core_validation.cpp revision a01b5eb150981aad061238e64b173d0da8c11140
1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 */
26
27// Allow use of STL min and max functions in Windows
28#define NOMINMAX
29
30// Turn on mem_tracker merged code
31#define MTMERGESOURCE 1
32
33#include <SPIRV/spirv.hpp>
34#include <algorithm>
35#include <assert.h>
36#include <iostream>
37#include <list>
38#include <map>
39#include <mutex>
40#include <set>
41#include <stdio.h>
42#include <stdlib.h>
43#include <string.h>
44#include <string>
45#include <tuple>
46
47#include "vk_loader_platform.h"
48#include "vk_dispatch_table_helper.h"
49#include "vk_struct_string_helper_cpp.h"
50#if defined(__GNUC__)
51#pragma GCC diagnostic ignored "-Wwrite-strings"
52#endif
53#if defined(__GNUC__)
54#pragma GCC diagnostic warning "-Wwrite-strings"
55#endif
56#include "vk_struct_size_helper.h"
57#include "core_validation.h"
58#include "vk_layer_table.h"
59#include "vk_layer_data.h"
60#include "vk_layer_extension_utils.h"
61#include "vk_layer_utils.h"
62#include "spirv-tools/libspirv.h"
63
64#if defined __ANDROID__
65#include <android/log.h>
66#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
67#else
68#define LOGCONSOLE(...)                                                                                                            \
69    {                                                                                                                              \
70        printf(__VA_ARGS__);                                                                                                       \
71        printf("\n");                                                                                                              \
72    }
73#endif
74
75using namespace std;
76
77// TODO : CB really needs it's own class and files so this is just temp code until that happens
78GLOBAL_CB_NODE::~GLOBAL_CB_NODE() {
79    for (uint32_t i=0; i<VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
80        // Make sure that no sets hold onto deleted CB binding
81        for (auto set : lastBound[i].uniqueBoundSets) {
82            set->RemoveBoundCommandBuffer(this);
83        }
84    }
85}
86
87namespace core_validation {
88
89using std::unordered_map;
90using std::unordered_set;
91
92// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
93// Object value will be used to identify them internally.
94static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
95
96struct devExts {
97    bool wsi_enabled;
98    unordered_map<VkSwapchainKHR, unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
99    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
100};
101
102// fwd decls
103struct shader_module;
104
105// TODO : Split this into separate structs for instance and device level data?
106struct layer_data {
107    VkInstance instance;
108
109    debug_report_data *report_data;
110    std::vector<VkDebugReportCallbackEXT> logging_callback;
111    VkLayerDispatchTable *device_dispatch_table;
112    VkLayerInstanceDispatchTable *instance_dispatch_table;
113
114    devExts device_extensions;
115    unordered_set<VkQueue> queues;  // all queues under given device
116    // Global set of all cmdBuffers that are inFlight on this device
117    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
118    // Layer specific data
119    unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> samplerMap;
120    unordered_map<VkImageView, unique_ptr<VkImageViewCreateInfo>> imageViewMap;
121    unordered_map<VkImage, unique_ptr<IMAGE_NODE>> imageMap;
122    unordered_map<VkBufferView, unique_ptr<VkBufferViewCreateInfo>> bufferViewMap;
123    unordered_map<VkBuffer, unique_ptr<BUFFER_NODE>> bufferMap;
124    unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
125    unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
126    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
127    unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
128    unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
129    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
130    unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
131    unordered_map<VkFence, FENCE_NODE> fenceMap;
132    unordered_map<VkQueue, QUEUE_NODE> queueMap;
133    unordered_map<VkEvent, EVENT_NODE> eventMap;
134    unordered_map<QueryObject, bool> queryToStateMap;
135    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
136    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
137    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
138    unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_NODE>> frameBufferMap;
139    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
140    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
141    unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
142    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
143    VkDevice device;
144
145    // Device specific data
146    PHYS_DEV_PROPERTIES_NODE phys_dev_properties;
147    VkPhysicalDeviceMemoryProperties phys_dev_mem_props;
148
149    layer_data()
150        : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr), device_extensions(),
151          device(VK_NULL_HANDLE), phys_dev_properties{}, phys_dev_mem_props{} {};
152};
153
154// TODO : Do we need to guard access to layer_data_map w/ lock?
155static unordered_map<void *, layer_data *> layer_data_map;
156
157static const VkLayerProperties global_layer = {
158    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
159};
160
161template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
162    bool foundLayer = false;
163    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
164        if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
165            foundLayer = true;
166        }
167        // This has to be logged to console as we don't have a callback at this point.
168        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
169            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
170                       global_layer.layerName);
171        }
172    }
173}
174
175// Code imported from shader_checker
176static void build_def_index(shader_module *);
177
178// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
179// without the caller needing to care too much about the physical SPIRV module layout.
180struct spirv_inst_iter {
181    std::vector<uint32_t>::const_iterator zero;
182    std::vector<uint32_t>::const_iterator it;
183
184    uint32_t len() {
185        auto result = *it >> 16;
186        assert(result > 0);
187        return result;
188    }
189
190    uint32_t opcode() { return *it & 0x0ffffu; }
191
192    uint32_t const &word(unsigned n) {
193        assert(n < len());
194        return it[n];
195    }
196
197    uint32_t offset() { return (uint32_t)(it - zero); }
198
199    spirv_inst_iter() {}
200
201    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
202
203    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
204
205    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
206
207    spirv_inst_iter operator++(int) { /* x++ */
208        spirv_inst_iter ii = *this;
209        it += len();
210        return ii;
211    }
212
213    spirv_inst_iter operator++() { /* ++x; */
214        it += len();
215        return *this;
216    }
217
218    /* The iterator and the value are the same thing. */
219    spirv_inst_iter &operator*() { return *this; }
220    spirv_inst_iter const &operator*() const { return *this; }
221};
222
223struct shader_module {
224    /* the spirv image itself */
225    vector<uint32_t> words;
226    /* a mapping of <id> to the first word of its def. this is useful because walking type
227     * trees, constant expressions, etc requires jumping all over the instruction stream.
228     */
229    unordered_map<unsigned, unsigned> def_index;
230
231    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
232        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
233          def_index() {
234
235        build_def_index(this);
236    }
237
238    /* expose begin() / end() to enable range-based for */
239    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
240    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
241    /* given an offset into the module, produce an iterator there. */
242    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
243
244    /* gets an iterator to the definition of an id */
245    spirv_inst_iter get_def(unsigned id) const {
246        auto it = def_index.find(id);
247        if (it == def_index.end()) {
248            return end();
249        }
250        return at(it->second);
251    }
252};
253
254// TODO : This can be much smarter, using separate locks for separate global data
255static std::mutex global_lock;
256
257// Return ImageViewCreateInfo ptr for specified imageView or else NULL
258VkImageViewCreateInfo *getImageViewData(const layer_data *dev_data, VkImageView image_view) {
259    auto iv_it = dev_data->imageViewMap.find(image_view);
260    if (iv_it == dev_data->imageViewMap.end()) {
261        return nullptr;
262    }
263    return iv_it->second.get();
264}
265// Return sampler node ptr for specified sampler or else NULL
266SAMPLER_NODE *getSamplerNode(const layer_data *dev_data, VkSampler sampler) {
267    auto sampler_it = dev_data->samplerMap.find(sampler);
268    if (sampler_it == dev_data->samplerMap.end()) {
269        return nullptr;
270    }
271    return sampler_it->second.get();
272}
273// Return image node ptr for specified image or else NULL
274IMAGE_NODE *getImageNode(const layer_data *dev_data, VkImage image) {
275    auto img_it = dev_data->imageMap.find(image);
276    if (img_it == dev_data->imageMap.end()) {
277        return nullptr;
278    }
279    return img_it->second.get();
280}
281// Return buffer node ptr for specified buffer or else NULL
282BUFFER_NODE *getBufferNode(const layer_data *dev_data, VkBuffer buffer) {
283    auto buff_it = dev_data->bufferMap.find(buffer);
284    if (buff_it == dev_data->bufferMap.end()) {
285        return nullptr;
286    }
287    return buff_it->second.get();
288}
289// Return swapchain node for specified swapchain or else NULL
290SWAPCHAIN_NODE *getSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
291    auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain);
292    if (swp_it == dev_data->device_extensions.swapchainMap.end()) {
293        return nullptr;
294    }
295    return swp_it->second.get();
296}
297// Return swapchain for specified image or else NULL
298VkSwapchainKHR getSwapchainFromImage(const layer_data *dev_data, VkImage image) {
299    auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image);
300    if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) {
301        return VK_NULL_HANDLE;
302    }
303    return img_it->second;
304}
305// Return buffer node ptr for specified buffer or else NULL
306VkBufferViewCreateInfo *getBufferViewInfo(const layer_data *my_data, VkBufferView buffer_view) {
307    auto bv_it = my_data->bufferViewMap.find(buffer_view);
308    if (bv_it == my_data->bufferViewMap.end()) {
309        return nullptr;
310    }
311    return bv_it->second.get();
312}
313
314FENCE_NODE *getFenceNode(layer_data *dev_data, VkFence fence) {
315    auto it = dev_data->fenceMap.find(fence);
316    if (it == dev_data->fenceMap.end()) {
317        return nullptr;
318    }
319    return &it->second;
320}
321
322QUEUE_NODE *getQueueNode(layer_data *dev_data, VkQueue queue) {
323    auto it = dev_data->queueMap.find(queue);
324    if (it == dev_data->queueMap.end()) {
325        return nullptr;
326    }
327    return &it->second;
328}
329
330SEMAPHORE_NODE *getSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
331    auto it = dev_data->semaphoreMap.find(semaphore);
332    if (it == dev_data->semaphoreMap.end()) {
333        return nullptr;
334    }
335    return &it->second;
336}
337
338COMMAND_POOL_NODE *getCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
339    auto it = dev_data->commandPoolMap.find(pool);
340    if (it == dev_data->commandPoolMap.end()) {
341        return nullptr;
342    }
343    return &it->second;
344}
345
346static VkDeviceMemory *get_object_mem_binding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
347    switch (type) {
348    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
349        auto img_node = getImageNode(my_data, VkImage(handle));
350        if (img_node)
351            return &img_node->mem;
352        break;
353    }
354    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
355        auto buff_node = getBufferNode(my_data, VkBuffer(handle));
356        if (buff_node)
357            return &buff_node->mem;
358        break;
359    }
360    default:
361        break;
362    }
363    return nullptr;
364}
365
366// prototype
367static GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer);
368
369// Helper function to validate correct usage bits set for buffers or images
370//  Verify that (actual & desired) flags != 0 or,
371//   if strict is true, verify that (actual & desired) flags == desired
372//  In case of error, report it via dbg callbacks
373static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict,
374                                     uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
375                                     char const *func_name, char const *usage_str) {
376    bool correct_usage = false;
377    bool skipCall = false;
378    if (strict)
379        correct_usage = ((actual & desired) == desired);
380    else
381        correct_usage = ((actual & desired) != 0);
382    if (!correct_usage) {
383        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
384                           MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s 0x%" PRIxLEAST64
385                                                               " used by %s. In this case, %s should have %s set during creation.",
386                           ty_str, obj_handle, func_name, ty_str, usage_str);
387    }
388    return skipCall;
389}
390
391// Helper function to validate usage flags for images
392// Pulls image info and then sends actual vs. desired usage off to helper above where
393//  an error will be flagged if usage is not correct
394static bool validate_image_usage_flags(layer_data *dev_data, VkImage image, VkFlags desired, VkBool32 strict,
395                                           char const *func_name, char const *usage_string) {
396    bool skipCall = false;
397    auto const image_node = getImageNode(dev_data, image);
398    if (image_node) {
399        skipCall = validate_usage_flags(dev_data, image_node->createInfo.usage, desired, strict, (uint64_t)image,
400                                        VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "image", func_name, usage_string);
401    }
402    return skipCall;
403}
404
405// Helper function to validate usage flags for buffers
406// Pulls buffer info and then sends actual vs. desired usage off to helper above where
407//  an error will be flagged if usage is not correct
408static bool validate_buffer_usage_flags(layer_data *dev_data, VkBuffer buffer, VkFlags desired, VkBool32 strict,
409                                            char const *func_name, char const *usage_string) {
410    bool skipCall = false;
411    auto buffer_node = getBufferNode(dev_data, buffer);
412    if (buffer_node) {
413        skipCall = validate_usage_flags(dev_data, buffer_node->createInfo.usage, desired, strict, (uint64_t)buffer,
414                                        VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string);
415    }
416    return skipCall;
417}
418
419// Return ptr to info in map container containing mem, or NULL if not found
420//  Calls to this function should be wrapped in mutex
421DEVICE_MEM_INFO *getMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
422    auto mem_it = dev_data->memObjMap.find(mem);
423    if (mem_it == dev_data->memObjMap.end()) {
424        return NULL;
425    }
426    return mem_it->second.get();
427}
428
429static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
430                             const VkMemoryAllocateInfo *pAllocateInfo) {
431    assert(object != NULL);
432
433    my_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
434}
435
436static bool validate_memory_is_valid(layer_data *dev_data, VkDeviceMemory mem, const char *functionName,
437                                     VkImage image = VK_NULL_HANDLE) {
438    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
439        auto const image_node = getImageNode(dev_data, image);
440        if (image_node && !image_node->valid) {
441            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
442                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
443                           "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
444                           functionName, (uint64_t)(image));
445        }
446    } else {
447        DEVICE_MEM_INFO *pMemObj = getMemObjInfo(dev_data, mem);
448        if (pMemObj && !pMemObj->valid) {
449            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
450                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
451                           "%s: Cannot read invalid memory 0x%" PRIx64 ", please fill the memory before using.", functionName,
452                           (uint64_t)(mem));
453        }
454    }
455    return false;
456}
457
458static void set_memory_valid(layer_data *dev_data, VkDeviceMemory mem, bool valid, VkImage image = VK_NULL_HANDLE) {
459    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
460        auto image_node = getImageNode(dev_data, image);
461        if (image_node) {
462            image_node->valid = valid;
463        }
464    } else {
465        DEVICE_MEM_INFO *pMemObj = getMemObjInfo(dev_data, mem);
466        if (pMemObj) {
467            pMemObj->valid = valid;
468        }
469    }
470}
471
472// Find CB Info and add mem reference to list container
473// Find Mem Obj Info and add CB reference to list container
474static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
475                                              const char *apiName) {
476    bool skipCall = false;
477
478    // Skip validation if this image was created through WSI
479    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
480
481        // First update CB binding in MemObj mini CB list
482        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
483        if (pMemInfo) {
484            pMemInfo->commandBufferBindings.insert(cb);
485            // Now update CBInfo's Mem reference list
486            GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
487            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
488            if (pCBNode) {
489                pCBNode->memObjs.insert(mem);
490            }
491        }
492    }
493    return skipCall;
494}
495// For every mem obj bound to particular CB, free bindings related to that CB
496static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *pCBNode) {
497    if (pCBNode) {
498        if (pCBNode->memObjs.size() > 0) {
499            for (auto mem : pCBNode->memObjs) {
500                DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
501                if (pInfo) {
502                    pInfo->commandBufferBindings.erase(pCBNode->commandBuffer);
503                }
504            }
505            pCBNode->memObjs.clear();
506        }
507        pCBNode->validate_functions.clear();
508    }
509}
510// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
511static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
512    clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
513}
514
515// For given MemObjInfo, report Obj & CB bindings
516static bool reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
517    bool skipCall = false;
518    size_t cmdBufRefCount = pMemObjInfo->commandBufferBindings.size();
519    size_t objRefCount = pMemObjInfo->objBindings.size();
520
521    if ((pMemObjInfo->commandBufferBindings.size()) != 0) {
522        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
523                           (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
524                           "Attempting to free memory object 0x%" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
525                           " references",
526                           (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
527    }
528
529    if (cmdBufRefCount > 0 && pMemObjInfo->commandBufferBindings.size() > 0) {
530        for (auto cb : pMemObjInfo->commandBufferBindings) {
531            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
532                    (uint64_t)cb, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
533                    "Command Buffer 0x%p still has a reference to mem obj 0x%" PRIxLEAST64, cb, (uint64_t)pMemObjInfo->mem);
534        }
535        // Clear the list of hanging references
536        pMemObjInfo->commandBufferBindings.clear();
537    }
538
539    if (objRefCount > 0 && pMemObjInfo->objBindings.size() > 0) {
540        for (auto obj : pMemObjInfo->objBindings) {
541            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__,
542                    MEMTRACK_FREED_MEM_REF, "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64,
543                    obj.handle, (uint64_t)pMemObjInfo->mem);
544        }
545        // Clear the list of hanging references
546        pMemObjInfo->objBindings.clear();
547    }
548    return skipCall;
549}
550
551static bool deleteMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem) {
552    bool skipCall = false;
553    auto item = my_data->memObjMap.find(mem);
554    if (item != my_data->memObjMap.end()) {
555        my_data->memObjMap.erase(item);
556    } else {
557        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
558                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
559                           "Request to delete memory object 0x%" PRIxLEAST64 " not present in memory Object Map", (uint64_t)mem);
560    }
561    return skipCall;
562}
563
564static bool freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, bool internal) {
565    bool skipCall = false;
566    // Parse global list to find info w/ mem
567    DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
568    if (pInfo) {
569        if (pInfo->allocInfo.allocationSize == 0 && !internal) {
570            // TODO: Verify against Valid Use section
571            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
572                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
573                               "Attempting to free memory associated with a Persistent Image, 0x%" PRIxLEAST64 ", "
574                               "this should not be explicitly freed\n",
575                               (uint64_t)mem);
576        } else {
577            // Clear any CB bindings for completed CBs
578            //   TODO : Is there a better place to do this?
579
580            assert(pInfo->object != VK_NULL_HANDLE);
581            // clear_cmd_buf_and_mem_references removes elements from
582            // pInfo->commandBufferBindings -- this copy not needed in c++14,
583            // and probably not needed in practice in c++11
584            auto bindings = pInfo->commandBufferBindings;
585            for (auto cb : bindings) {
586                if (!dev_data->globalInFlightCmdBuffers.count(cb)) {
587                    clear_cmd_buf_and_mem_references(dev_data, cb);
588                }
589            }
590
591            // Now verify that no references to this mem obj remain and remove bindings
592            if (pInfo->commandBufferBindings.size() || pInfo->objBindings.size()) {
593                skipCall |= reportMemReferencesAndCleanUp(dev_data, pInfo);
594            }
595            // Delete mem obj info
596            skipCall |= deleteMemObjInfo(dev_data, object, mem);
597        }
598    }
599    return skipCall;
600}
601
602static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
603    switch (type) {
604    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
605        return "image";
606    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
607        return "buffer";
608    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
609        return "swapchain";
610    default:
611        return "unknown";
612    }
613}
614
615// Remove object binding performs 3 tasks:
616// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
617// 2. Clear mem binding for image/buffer by setting its handle to 0
618// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
619static bool clear_object_binding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
620    // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
621    bool skipCall = false;
622    VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
623    if (pMemBinding) {
624        DEVICE_MEM_INFO *pMemObjInfo = getMemObjInfo(dev_data, *pMemBinding);
625        // TODO : Make sure this is a reasonable way to reset mem binding
626        *pMemBinding = VK_NULL_HANDLE;
627        if (pMemObjInfo) {
628            // This obj is bound to a memory object. Remove the reference to this object in that memory object's list,
629            // and set the objects memory binding pointer to NULL.
630            if (!pMemObjInfo->objBindings.erase({handle, type})) {
631                skipCall |=
632                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
633                            "MEM", "While trying to clear mem binding for %s obj 0x%" PRIxLEAST64
634                                   ", unable to find that object referenced by mem obj 0x%" PRIxLEAST64,
635                            object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
636            }
637        }
638    }
639    return skipCall;
640}
641
642// For NULL mem case, output warning
643// Make sure given object is in global object map
644//  IF a previous binding existed, output validation error
645//  Otherwise, add reference from objectInfo to memoryInfo
646//  Add reference off of objInfo
647static bool set_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
648                                VkDebugReportObjectTypeEXT type, const char *apiName) {
649    bool skipCall = false;
650    // Handle NULL case separately, just clear previous binding & decrement reference
651    if (mem == VK_NULL_HANDLE) {
652        // TODO: Verify against Valid Use section of spec.
653        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
654                           "MEM", "In %s, attempting to Bind Obj(0x%" PRIxLEAST64 ") to NULL", apiName, handle);
655    } else {
656        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
657        assert(pMemBinding);
658        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
659        if (pMemInfo) {
660            DEVICE_MEM_INFO *pPrevBinding = getMemObjInfo(dev_data, *pMemBinding);
661            if (pPrevBinding != NULL) {
662                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
663                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT,
664                                    "MEM", "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
665                                           ") which has already been bound to mem object 0x%" PRIxLEAST64,
666                                    apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem);
667            } else {
668                pMemInfo->objBindings.insert({handle, type});
669                // For image objects, make sure default memory state is correctly set
670                // TODO : What's the best/correct way to handle this?
671                if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
672                    auto const image_node = getImageNode(dev_data, VkImage(handle));
673                    if (image_node) {
674                        VkImageCreateInfo ici = image_node->createInfo;
675                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
676                            // TODO::  More memory state transition stuff.
677                        }
678                    }
679                }
680                *pMemBinding = mem;
681            }
682        }
683    }
684    return skipCall;
685}
686
687// For NULL mem case, clear any previous binding Else...
688// Make sure given object is in its object map
689//  IF a previous binding existed, update binding
690//  Add reference from objectInfo to memoryInfo
691//  Add reference off of object's binding info
692// Return VK_TRUE if addition is successful, VK_FALSE otherwise
693static bool set_sparse_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
694                                       VkDebugReportObjectTypeEXT type, const char *apiName) {
695    bool skipCall = VK_FALSE;
696    // Handle NULL case separately, just clear previous binding & decrement reference
697    if (mem == VK_NULL_HANDLE) {
698        skipCall = clear_object_binding(dev_data, handle, type);
699    } else {
700        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
701        assert(pMemBinding);
702        DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
703        if (pInfo) {
704            pInfo->objBindings.insert({handle, type});
705            // Need to set mem binding for this object
706            *pMemBinding = mem;
707        }
708    }
709    return skipCall;
710}
711
712// For given Object, get 'mem' obj that it's bound to or NULL if no binding
713static bool get_mem_binding_from_object(layer_data *dev_data, const uint64_t handle,
714                                            const VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
715    bool skipCall = false;
716    *mem = VK_NULL_HANDLE;
717    VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
718    if (pMemBinding) {
719        *mem = *pMemBinding;
720    } else {
721        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
722                           "MEM", "Trying to get mem binding for object 0x%" PRIxLEAST64 " but no such object in %s list", handle,
723                           object_type_to_string(type));
724    }
725    return skipCall;
726}
727
728// Print details of MemObjInfo list
729static void print_mem_list(layer_data *dev_data) {
730    // Early out if info is not requested
731    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
732        return;
733    }
734
735    // Just printing each msg individually for now, may want to package these into single large print
736    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
737            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
738            dev_data->memObjMap.size());
739    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
740            MEMTRACK_NONE, "MEM", "=============================");
741
742    if (dev_data->memObjMap.size() <= 0)
743        return;
744
745    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
746        auto mem_info = (*ii).second.get();
747
748        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
749                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at 0x%p===", (void *)mem_info);
750        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
751                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: 0x%" PRIxLEAST64, (uint64_t)(mem_info->mem));
752        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
753                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: " PRINTF_SIZE_T_SPECIFIER,
754                mem_info->commandBufferBindings.size() + mem_info->objBindings.size());
755        if (0 != mem_info->allocInfo.allocationSize) {
756            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&mem_info->allocInfo, "MEM(INFO):         ");
757            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
758                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
759        } else {
760            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
761                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
762        }
763
764        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
765                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
766                mem_info->objBindings.size());
767        if (mem_info->objBindings.size() > 0) {
768            for (auto obj : mem_info->objBindings) {
769                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
770                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT 0x%" PRIx64, obj.handle);
771            }
772        }
773
774        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
775                __LINE__, MEMTRACK_NONE, "MEM",
776                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
777                mem_info->commandBufferBindings.size());
778        if (mem_info->commandBufferBindings.size() > 0) {
779            for (auto cb : mem_info->commandBufferBindings) {
780                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
781                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB 0x%p", cb);
782            }
783        }
784    }
785}
786
787static void printCBList(layer_data *my_data) {
788    GLOBAL_CB_NODE *pCBInfo = NULL;
789
790    // Early out if info is not requested
791    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
792        return;
793    }
794
795    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
796            MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
797            my_data->commandBufferMap.size());
798    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
799            MEMTRACK_NONE, "MEM", "==================");
800
801    if (my_data->commandBufferMap.size() <= 0)
802        return;
803
804    for (auto &cb_node : my_data->commandBufferMap) {
805        pCBInfo = cb_node.second;
806
807        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
808                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (0x%p) has CB 0x%p", (void *)pCBInfo, (void *)pCBInfo->commandBuffer);
809
810        if (pCBInfo->memObjs.size() <= 0)
811            continue;
812        for (auto obj : pCBInfo->memObjs) {
813            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
814                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj 0x%" PRIx64, (uint64_t)obj);
815        }
816    }
817}
818
819// Return a string representation of CMD_TYPE enum
820static string cmdTypeToString(CMD_TYPE cmd) {
821    switch (cmd) {
822    case CMD_BINDPIPELINE:
823        return "CMD_BINDPIPELINE";
824    case CMD_BINDPIPELINEDELTA:
825        return "CMD_BINDPIPELINEDELTA";
826    case CMD_SETVIEWPORTSTATE:
827        return "CMD_SETVIEWPORTSTATE";
828    case CMD_SETLINEWIDTHSTATE:
829        return "CMD_SETLINEWIDTHSTATE";
830    case CMD_SETDEPTHBIASSTATE:
831        return "CMD_SETDEPTHBIASSTATE";
832    case CMD_SETBLENDSTATE:
833        return "CMD_SETBLENDSTATE";
834    case CMD_SETDEPTHBOUNDSSTATE:
835        return "CMD_SETDEPTHBOUNDSSTATE";
836    case CMD_SETSTENCILREADMASKSTATE:
837        return "CMD_SETSTENCILREADMASKSTATE";
838    case CMD_SETSTENCILWRITEMASKSTATE:
839        return "CMD_SETSTENCILWRITEMASKSTATE";
840    case CMD_SETSTENCILREFERENCESTATE:
841        return "CMD_SETSTENCILREFERENCESTATE";
842    case CMD_BINDDESCRIPTORSETS:
843        return "CMD_BINDDESCRIPTORSETS";
844    case CMD_BINDINDEXBUFFER:
845        return "CMD_BINDINDEXBUFFER";
846    case CMD_BINDVERTEXBUFFER:
847        return "CMD_BINDVERTEXBUFFER";
848    case CMD_DRAW:
849        return "CMD_DRAW";
850    case CMD_DRAWINDEXED:
851        return "CMD_DRAWINDEXED";
852    case CMD_DRAWINDIRECT:
853        return "CMD_DRAWINDIRECT";
854    case CMD_DRAWINDEXEDINDIRECT:
855        return "CMD_DRAWINDEXEDINDIRECT";
856    case CMD_DISPATCH:
857        return "CMD_DISPATCH";
858    case CMD_DISPATCHINDIRECT:
859        return "CMD_DISPATCHINDIRECT";
860    case CMD_COPYBUFFER:
861        return "CMD_COPYBUFFER";
862    case CMD_COPYIMAGE:
863        return "CMD_COPYIMAGE";
864    case CMD_BLITIMAGE:
865        return "CMD_BLITIMAGE";
866    case CMD_COPYBUFFERTOIMAGE:
867        return "CMD_COPYBUFFERTOIMAGE";
868    case CMD_COPYIMAGETOBUFFER:
869        return "CMD_COPYIMAGETOBUFFER";
870    case CMD_CLONEIMAGEDATA:
871        return "CMD_CLONEIMAGEDATA";
872    case CMD_UPDATEBUFFER:
873        return "CMD_UPDATEBUFFER";
874    case CMD_FILLBUFFER:
875        return "CMD_FILLBUFFER";
876    case CMD_CLEARCOLORIMAGE:
877        return "CMD_CLEARCOLORIMAGE";
878    case CMD_CLEARATTACHMENTS:
879        return "CMD_CLEARCOLORATTACHMENT";
880    case CMD_CLEARDEPTHSTENCILIMAGE:
881        return "CMD_CLEARDEPTHSTENCILIMAGE";
882    case CMD_RESOLVEIMAGE:
883        return "CMD_RESOLVEIMAGE";
884    case CMD_SETEVENT:
885        return "CMD_SETEVENT";
886    case CMD_RESETEVENT:
887        return "CMD_RESETEVENT";
888    case CMD_WAITEVENTS:
889        return "CMD_WAITEVENTS";
890    case CMD_PIPELINEBARRIER:
891        return "CMD_PIPELINEBARRIER";
892    case CMD_BEGINQUERY:
893        return "CMD_BEGINQUERY";
894    case CMD_ENDQUERY:
895        return "CMD_ENDQUERY";
896    case CMD_RESETQUERYPOOL:
897        return "CMD_RESETQUERYPOOL";
898    case CMD_COPYQUERYPOOLRESULTS:
899        return "CMD_COPYQUERYPOOLRESULTS";
900    case CMD_WRITETIMESTAMP:
901        return "CMD_WRITETIMESTAMP";
902    case CMD_INITATOMICCOUNTERS:
903        return "CMD_INITATOMICCOUNTERS";
904    case CMD_LOADATOMICCOUNTERS:
905        return "CMD_LOADATOMICCOUNTERS";
906    case CMD_SAVEATOMICCOUNTERS:
907        return "CMD_SAVEATOMICCOUNTERS";
908    case CMD_BEGINRENDERPASS:
909        return "CMD_BEGINRENDERPASS";
910    case CMD_ENDRENDERPASS:
911        return "CMD_ENDRENDERPASS";
912    default:
913        return "UNKNOWN";
914    }
915}
916
917// SPIRV utility functions
918static void build_def_index(shader_module *module) {
919    for (auto insn : *module) {
920        switch (insn.opcode()) {
921        /* Types */
922        case spv::OpTypeVoid:
923        case spv::OpTypeBool:
924        case spv::OpTypeInt:
925        case spv::OpTypeFloat:
926        case spv::OpTypeVector:
927        case spv::OpTypeMatrix:
928        case spv::OpTypeImage:
929        case spv::OpTypeSampler:
930        case spv::OpTypeSampledImage:
931        case spv::OpTypeArray:
932        case spv::OpTypeRuntimeArray:
933        case spv::OpTypeStruct:
934        case spv::OpTypeOpaque:
935        case spv::OpTypePointer:
936        case spv::OpTypeFunction:
937        case spv::OpTypeEvent:
938        case spv::OpTypeDeviceEvent:
939        case spv::OpTypeReserveId:
940        case spv::OpTypeQueue:
941        case spv::OpTypePipe:
942            module->def_index[insn.word(1)] = insn.offset();
943            break;
944
945        /* Fixed constants */
946        case spv::OpConstantTrue:
947        case spv::OpConstantFalse:
948        case spv::OpConstant:
949        case spv::OpConstantComposite:
950        case spv::OpConstantSampler:
951        case spv::OpConstantNull:
952            module->def_index[insn.word(2)] = insn.offset();
953            break;
954
955        /* Specialization constants */
956        case spv::OpSpecConstantTrue:
957        case spv::OpSpecConstantFalse:
958        case spv::OpSpecConstant:
959        case spv::OpSpecConstantComposite:
960        case spv::OpSpecConstantOp:
961            module->def_index[insn.word(2)] = insn.offset();
962            break;
963
964        /* Variables */
965        case spv::OpVariable:
966            module->def_index[insn.word(2)] = insn.offset();
967            break;
968
969        /* Functions */
970        case spv::OpFunction:
971            module->def_index[insn.word(2)] = insn.offset();
972            break;
973
974        default:
975            /* We don't care about any other defs for now. */
976            break;
977        }
978    }
979}
980
981static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
982    for (auto insn : *src) {
983        if (insn.opcode() == spv::OpEntryPoint) {
984            auto entrypointName = (char const *)&insn.word(3);
985            auto entrypointStageBits = 1u << insn.word(1);
986
987            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
988                return insn;
989            }
990        }
991    }
992
993    return src->end();
994}
995
996static char const *storage_class_name(unsigned sc) {
997    switch (sc) {
998    case spv::StorageClassInput:
999        return "input";
1000    case spv::StorageClassOutput:
1001        return "output";
1002    case spv::StorageClassUniformConstant:
1003        return "const uniform";
1004    case spv::StorageClassUniform:
1005        return "uniform";
1006    case spv::StorageClassWorkgroup:
1007        return "workgroup local";
1008    case spv::StorageClassCrossWorkgroup:
1009        return "workgroup global";
1010    case spv::StorageClassPrivate:
1011        return "private global";
1012    case spv::StorageClassFunction:
1013        return "function";
1014    case spv::StorageClassGeneric:
1015        return "generic";
1016    case spv::StorageClassAtomicCounter:
1017        return "atomic counter";
1018    case spv::StorageClassImage:
1019        return "image";
1020    case spv::StorageClassPushConstant:
1021        return "push constant";
1022    default:
1023        return "unknown";
1024    }
1025}
1026
1027/* get the value of an integral constant */
1028unsigned get_constant_value(shader_module const *src, unsigned id) {
1029    auto value = src->get_def(id);
1030    assert(value != src->end());
1031
1032    if (value.opcode() != spv::OpConstant) {
1033        /* TODO: Either ensure that the specialization transform is already performed on a module we're
1034            considering here, OR -- specialize on the fly now.
1035            */
1036        return 1;
1037    }
1038
1039    return value.word(3);
1040}
1041
1042
1043static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1044    auto insn = src->get_def(type);
1045    assert(insn != src->end());
1046
1047    switch (insn.opcode()) {
1048    case spv::OpTypeBool:
1049        ss << "bool";
1050        break;
1051    case spv::OpTypeInt:
1052        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1053        break;
1054    case spv::OpTypeFloat:
1055        ss << "float" << insn.word(2);
1056        break;
1057    case spv::OpTypeVector:
1058        ss << "vec" << insn.word(3) << " of ";
1059        describe_type_inner(ss, src, insn.word(2));
1060        break;
1061    case spv::OpTypeMatrix:
1062        ss << "mat" << insn.word(3) << " of ";
1063        describe_type_inner(ss, src, insn.word(2));
1064        break;
1065    case spv::OpTypeArray:
1066        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1067        describe_type_inner(ss, src, insn.word(2));
1068        break;
1069    case spv::OpTypePointer:
1070        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1071        describe_type_inner(ss, src, insn.word(3));
1072        break;
1073    case spv::OpTypeStruct: {
1074        ss << "struct of (";
1075        for (unsigned i = 2; i < insn.len(); i++) {
1076            describe_type_inner(ss, src, insn.word(i));
1077            if (i == insn.len() - 1) {
1078                ss << ")";
1079            } else {
1080                ss << ", ";
1081            }
1082        }
1083        break;
1084    }
1085    case spv::OpTypeSampler:
1086        ss << "sampler";
1087        break;
1088    case spv::OpTypeSampledImage:
1089        ss << "sampler+";
1090        describe_type_inner(ss, src, insn.word(2));
1091        break;
1092    case spv::OpTypeImage:
1093        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1094        break;
1095    default:
1096        ss << "oddtype";
1097        break;
1098    }
1099}
1100
1101
1102static std::string describe_type(shader_module const *src, unsigned type) {
1103    std::ostringstream ss;
1104    describe_type_inner(ss, src, type);
1105    return ss.str();
1106}
1107
1108
1109static bool is_narrow_numeric_type(spirv_inst_iter type)
1110{
1111    if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat)
1112        return false;
1113    return type.word(2) < 64;
1114}
1115
1116
1117static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) {
1118    /* walk two type trees together, and complain about differences */
1119    auto a_insn = a->get_def(a_type);
1120    auto b_insn = b->get_def(b_type);
1121    assert(a_insn != a->end());
1122    assert(b_insn != b->end());
1123
1124    if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1125        return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
1126    }
1127
1128    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1129        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1130        return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1131    }
1132
1133    if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1134        return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
1135    }
1136
1137    if (a_insn.opcode() != b_insn.opcode()) {
1138        return false;
1139    }
1140
1141    if (a_insn.opcode() == spv::OpTypePointer) {
1142        /* match on pointee type. storage class is expected to differ */
1143        return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
1144    }
1145
1146    if (a_arrayed || b_arrayed) {
1147        /* if we havent resolved array-of-verts by here, we're not going to. */
1148        return false;
1149    }
1150
1151    switch (a_insn.opcode()) {
1152    case spv::OpTypeBool:
1153        return true;
1154    case spv::OpTypeInt:
1155        /* match on width, signedness */
1156        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1157    case spv::OpTypeFloat:
1158        /* match on width */
1159        return a_insn.word(2) == b_insn.word(2);
1160    case spv::OpTypeVector:
1161        /* match on element type, count. */
1162        if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false))
1163            return false;
1164        if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1165            return a_insn.word(3) >= b_insn.word(3);
1166        }
1167        else {
1168            return a_insn.word(3) == b_insn.word(3);
1169        }
1170    case spv::OpTypeMatrix:
1171        /* match on element type, count. */
1172        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3);
1173    case spv::OpTypeArray:
1174        /* match on element type, count. these all have the same layout. we don't get here if
1175         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1176         * not a literal within OpTypeArray */
1177        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1178               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1179    case spv::OpTypeStruct:
1180        /* match on all element types */
1181        {
1182            if (a_insn.len() != b_insn.len()) {
1183                return false; /* structs cannot match if member counts differ */
1184            }
1185
1186            for (unsigned i = 2; i < a_insn.len(); i++) {
1187                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
1188                    return false;
1189                }
1190            }
1191
1192            return true;
1193        }
1194    default:
1195        /* remaining types are CLisms, or may not appear in the interfaces we
1196         * are interested in. Just claim no match.
1197         */
1198        return false;
1199    }
1200}
1201
1202static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1203    auto it = map.find(id);
1204    if (it == map.end())
1205        return def;
1206    else
1207        return it->second;
1208}
1209
1210static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1211    auto insn = src->get_def(type);
1212    assert(insn != src->end());
1213
1214    switch (insn.opcode()) {
1215    case spv::OpTypePointer:
1216        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1217         * we're never actually passing pointers around. */
1218        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1219    case spv::OpTypeArray:
1220        if (strip_array_level) {
1221            return get_locations_consumed_by_type(src, insn.word(2), false);
1222        } else {
1223            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1224        }
1225    case spv::OpTypeMatrix:
1226        /* num locations is the dimension * element size */
1227        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1228    case spv::OpTypeVector: {
1229        auto scalar_type = src->get_def(insn.word(2));
1230        auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ?
1231            scalar_type.word(2) : 32;
1232
1233        /* locations are 128-bit wide; 3- and 4-component vectors of 64 bit
1234         * types require two. */
1235        return (bit_width * insn.word(3) + 127) / 128;
1236    }
1237    default:
1238        /* everything else is just 1. */
1239        return 1;
1240
1241        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1242         * multiple locations. */
1243    }
1244}
1245
1246static unsigned get_locations_consumed_by_format(VkFormat format) {
1247    switch (format) {
1248    case VK_FORMAT_R64G64B64A64_SFLOAT:
1249    case VK_FORMAT_R64G64B64A64_SINT:
1250    case VK_FORMAT_R64G64B64A64_UINT:
1251    case VK_FORMAT_R64G64B64_SFLOAT:
1252    case VK_FORMAT_R64G64B64_SINT:
1253    case VK_FORMAT_R64G64B64_UINT:
1254        return 2;
1255    default:
1256        return 1;
1257    }
1258}
1259
1260typedef std::pair<unsigned, unsigned> location_t;
1261typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1262
1263struct interface_var {
1264    uint32_t id;
1265    uint32_t type_id;
1266    uint32_t offset;
1267    bool is_patch;
1268    bool is_block_member;
1269    /* TODO: collect the name, too? Isn't required to be present. */
1270};
1271
1272struct shader_stage_attributes {
1273    char const *const name;
1274    bool arrayed_input;
1275    bool arrayed_output;
1276};
1277
1278static shader_stage_attributes shader_stage_attribs[] = {
1279    {"vertex shader", false, false},
1280    {"tessellation control shader", true, true},
1281    {"tessellation evaluation shader", true, false},
1282    {"geometry shader", true, false},
1283    {"fragment shader", false, false},
1284};
1285
1286static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1287    while (true) {
1288
1289        if (def.opcode() == spv::OpTypePointer) {
1290            def = src->get_def(def.word(3));
1291        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1292            def = src->get_def(def.word(2));
1293            is_array_of_verts = false;
1294        } else if (def.opcode() == spv::OpTypeStruct) {
1295            return def;
1296        } else {
1297            return src->end();
1298        }
1299    }
1300}
1301
1302static void collect_interface_block_members(shader_module const *src,
1303                                            std::map<location_t, interface_var> &out,
1304                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1305                                            uint32_t id, uint32_t type_id, bool is_patch) {
1306    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1307    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1308    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1309        /* this isn't an interface block. */
1310        return;
1311    }
1312
1313    std::unordered_map<unsigned, unsigned> member_components;
1314
1315    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1316    for (auto insn : *src) {
1317        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1318            unsigned member_index = insn.word(2);
1319
1320            if (insn.word(3) == spv::DecorationComponent) {
1321                unsigned component = insn.word(4);
1322                member_components[member_index] = component;
1323            }
1324        }
1325    }
1326
1327    /* Second pass -- produce the output, from Location decorations */
1328    for (auto insn : *src) {
1329        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1330            unsigned member_index = insn.word(2);
1331            unsigned member_type_id = type.word(2 + member_index);
1332
1333            if (insn.word(3) == spv::DecorationLocation) {
1334                unsigned location = insn.word(4);
1335                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1336                auto component_it = member_components.find(member_index);
1337                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1338
1339                for (unsigned int offset = 0; offset < num_locations; offset++) {
1340                    interface_var v;
1341                    v.id = id;
1342                    /* TODO: member index in interface_var too? */
1343                    v.type_id = member_type_id;
1344                    v.offset = offset;
1345                    v.is_patch = is_patch;
1346                    v.is_block_member = true;
1347                    out[std::make_pair(location + offset, component)] = v;
1348                }
1349            }
1350        }
1351    }
1352}
1353
1354static void collect_interface_by_location(shader_module const *src, spirv_inst_iter entrypoint,
1355                                          spv::StorageClass sinterface, std::map<location_t, interface_var> &out,
1356                                          bool is_array_of_verts) {
1357    std::unordered_map<unsigned, unsigned> var_locations;
1358    std::unordered_map<unsigned, unsigned> var_builtins;
1359    std::unordered_map<unsigned, unsigned> var_components;
1360    std::unordered_map<unsigned, unsigned> blocks;
1361    std::unordered_map<unsigned, unsigned> var_patch;
1362
1363    for (auto insn : *src) {
1364
1365        /* We consider two interface models: SSO rendezvous-by-location, and
1366         * builtins. Complain about anything that fits neither model.
1367         */
1368        if (insn.opcode() == spv::OpDecorate) {
1369            if (insn.word(2) == spv::DecorationLocation) {
1370                var_locations[insn.word(1)] = insn.word(3);
1371            }
1372
1373            if (insn.word(2) == spv::DecorationBuiltIn) {
1374                var_builtins[insn.word(1)] = insn.word(3);
1375            }
1376
1377            if (insn.word(2) == spv::DecorationComponent) {
1378                var_components[insn.word(1)] = insn.word(3);
1379            }
1380
1381            if (insn.word(2) == spv::DecorationBlock) {
1382                blocks[insn.word(1)] = 1;
1383            }
1384
1385            if (insn.word(2) == spv::DecorationPatch) {
1386                var_patch[insn.word(1)] = 1;
1387            }
1388        }
1389    }
1390
1391    /* TODO: handle grouped decorations */
1392    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1393     * have the same location, and we DON'T want to clobber. */
1394
1395    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1396       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1397       the word to determine which word contains the terminator. */
1398    uint32_t word = 3;
1399    while (entrypoint.word(word) & 0xff000000u) {
1400        ++word;
1401    }
1402    ++word;
1403
1404    for (; word < entrypoint.len(); word++) {
1405        auto insn = src->get_def(entrypoint.word(word));
1406        assert(insn != src->end());
1407        assert(insn.opcode() == spv::OpVariable);
1408
1409        if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
1410            unsigned id = insn.word(2);
1411            unsigned type = insn.word(1);
1412
1413            int location = value_or_default(var_locations, id, -1);
1414            int builtin = value_or_default(var_builtins, id, -1);
1415            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1416            bool is_patch = var_patch.find(id) != var_patch.end();
1417
1418            /* All variables and interface block members in the Input or Output storage classes
1419             * must be decorated with either a builtin or an explicit location.
1420             *
1421             * TODO: integrate the interface block support here. For now, don't complain --
1422             * a valid SPIRV module will only hit this path for the interface block case, as the
1423             * individual members of the type are decorated, rather than variable declarations.
1424             */
1425
1426            if (location != -1) {
1427                /* A user-defined interface variable, with a location. Where a variable
1428                 * occupied multiple locations, emit one result for each. */
1429                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1430                for (unsigned int offset = 0; offset < num_locations; offset++) {
1431                    interface_var v;
1432                    v.id = id;
1433                    v.type_id = type;
1434                    v.offset = offset;
1435                    v.is_patch = is_patch;
1436                    v.is_block_member = false;
1437                    out[std::make_pair(location + offset, component)] = v;
1438                }
1439            } else if (builtin == -1) {
1440                /* An interface block instance */
1441                collect_interface_block_members(src, out, blocks, is_array_of_verts, id, type, is_patch);
1442            }
1443        }
1444    }
1445}
1446
1447static void collect_interface_by_descriptor_slot(debug_report_data *report_data, shader_module const *src,
1448                                                 std::unordered_set<uint32_t> const &accessible_ids,
1449                                                 std::map<descriptor_slot_t, interface_var> &out) {
1450
1451    std::unordered_map<unsigned, unsigned> var_sets;
1452    std::unordered_map<unsigned, unsigned> var_bindings;
1453
1454    for (auto insn : *src) {
1455        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1456         * DecorationDescriptorSet and DecorationBinding.
1457         */
1458        if (insn.opcode() == spv::OpDecorate) {
1459            if (insn.word(2) == spv::DecorationDescriptorSet) {
1460                var_sets[insn.word(1)] = insn.word(3);
1461            }
1462
1463            if (insn.word(2) == spv::DecorationBinding) {
1464                var_bindings[insn.word(1)] = insn.word(3);
1465            }
1466        }
1467    }
1468
1469    for (auto id : accessible_ids) {
1470        auto insn = src->get_def(id);
1471        assert(insn != src->end());
1472
1473        if (insn.opcode() == spv::OpVariable &&
1474            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1475            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1476            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1477
1478            auto existing_it = out.find(std::make_pair(set, binding));
1479            if (existing_it != out.end()) {
1480                /* conflict within spv image */
1481                log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1482                        __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
1483                        "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition",
1484                        insn.word(2), insn.word(1), storage_class_name(insn.word(3)), existing_it->first.first,
1485                        existing_it->first.second);
1486            }
1487
1488            interface_var v;
1489            v.id = insn.word(2);
1490            v.type_id = insn.word(1);
1491            v.offset = 0;
1492            v.is_patch = false;
1493            v.is_block_member = false;
1494            out[std::make_pair(set, binding)] = v;
1495        }
1496    }
1497}
1498
1499static bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer,
1500                                              spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1501                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1502                                              shader_stage_attributes const *consumer_stage) {
1503    std::map<location_t, interface_var> outputs;
1504    std::map<location_t, interface_var> inputs;
1505
1506    bool pass = true;
1507
1508    collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, outputs, producer_stage->arrayed_output);
1509    collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, inputs, consumer_stage->arrayed_input);
1510
1511    auto a_it = outputs.begin();
1512    auto b_it = inputs.begin();
1513
1514    /* maps sorted by key (location); walk them together to find mismatches */
1515    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1516        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1517        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1518        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1519        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1520
1521        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1522            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1523                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1524                        "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1525                        a_first.second, consumer_stage->name)) {
1526                pass = false;
1527            }
1528            a_it++;
1529        } else if (a_at_end || a_first > b_first) {
1530            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1531                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1532                        "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second,
1533                        producer_stage->name)) {
1534                pass = false;
1535            }
1536            b_it++;
1537        } else {
1538            // subtleties of arrayed interfaces:
1539            // - if is_patch, then the member is not arrayed, even though the interface may be.
1540            // - if is_block_member, then the extra array level of an arrayed interface is not
1541            //   expressed in the member type -- it's expressed in the block type.
1542            if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1543                             producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1544                             consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member,
1545                             true)) {
1546                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1547                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1548                            a_first.first, a_first.second,
1549                            describe_type(producer, a_it->second.type_id).c_str(),
1550                            describe_type(consumer, b_it->second.type_id).c_str())) {
1551                    pass = false;
1552                }
1553            }
1554            if (a_it->second.is_patch != b_it->second.is_patch) {
1555                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1556                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1557                            "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1558                            "per-%s in %s stage", a_first.first, a_first.second,
1559                            a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1560                            b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1561                    pass = false;
1562                }
1563            }
1564            a_it++;
1565            b_it++;
1566        }
1567    }
1568
1569    return pass;
1570}
1571
1572enum FORMAT_TYPE {
1573    FORMAT_TYPE_UNDEFINED,
1574    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1575    FORMAT_TYPE_SINT,
1576    FORMAT_TYPE_UINT,
1577};
1578
1579static unsigned get_format_type(VkFormat fmt) {
1580    switch (fmt) {
1581    case VK_FORMAT_UNDEFINED:
1582        return FORMAT_TYPE_UNDEFINED;
1583    case VK_FORMAT_R8_SINT:
1584    case VK_FORMAT_R8G8_SINT:
1585    case VK_FORMAT_R8G8B8_SINT:
1586    case VK_FORMAT_R8G8B8A8_SINT:
1587    case VK_FORMAT_R16_SINT:
1588    case VK_FORMAT_R16G16_SINT:
1589    case VK_FORMAT_R16G16B16_SINT:
1590    case VK_FORMAT_R16G16B16A16_SINT:
1591    case VK_FORMAT_R32_SINT:
1592    case VK_FORMAT_R32G32_SINT:
1593    case VK_FORMAT_R32G32B32_SINT:
1594    case VK_FORMAT_R32G32B32A32_SINT:
1595    case VK_FORMAT_R64_SINT:
1596    case VK_FORMAT_R64G64_SINT:
1597    case VK_FORMAT_R64G64B64_SINT:
1598    case VK_FORMAT_R64G64B64A64_SINT:
1599    case VK_FORMAT_B8G8R8_SINT:
1600    case VK_FORMAT_B8G8R8A8_SINT:
1601    case VK_FORMAT_A8B8G8R8_SINT_PACK32:
1602    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1603    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1604        return FORMAT_TYPE_SINT;
1605    case VK_FORMAT_R8_UINT:
1606    case VK_FORMAT_R8G8_UINT:
1607    case VK_FORMAT_R8G8B8_UINT:
1608    case VK_FORMAT_R8G8B8A8_UINT:
1609    case VK_FORMAT_R16_UINT:
1610    case VK_FORMAT_R16G16_UINT:
1611    case VK_FORMAT_R16G16B16_UINT:
1612    case VK_FORMAT_R16G16B16A16_UINT:
1613    case VK_FORMAT_R32_UINT:
1614    case VK_FORMAT_R32G32_UINT:
1615    case VK_FORMAT_R32G32B32_UINT:
1616    case VK_FORMAT_R32G32B32A32_UINT:
1617    case VK_FORMAT_R64_UINT:
1618    case VK_FORMAT_R64G64_UINT:
1619    case VK_FORMAT_R64G64B64_UINT:
1620    case VK_FORMAT_R64G64B64A64_UINT:
1621    case VK_FORMAT_B8G8R8_UINT:
1622    case VK_FORMAT_B8G8R8A8_UINT:
1623    case VK_FORMAT_A8B8G8R8_UINT_PACK32:
1624    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1625    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1626        return FORMAT_TYPE_UINT;
1627    default:
1628        return FORMAT_TYPE_FLOAT;
1629    }
1630}
1631
1632/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1633 * for comparison to a VkFormat's characterization above. */
1634static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1635    auto insn = src->get_def(type);
1636    assert(insn != src->end());
1637
1638    switch (insn.opcode()) {
1639    case spv::OpTypeInt:
1640        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1641    case spv::OpTypeFloat:
1642        return FORMAT_TYPE_FLOAT;
1643    case spv::OpTypeVector:
1644        return get_fundamental_type(src, insn.word(2));
1645    case spv::OpTypeMatrix:
1646        return get_fundamental_type(src, insn.word(2));
1647    case spv::OpTypeArray:
1648        return get_fundamental_type(src, insn.word(2));
1649    case spv::OpTypePointer:
1650        return get_fundamental_type(src, insn.word(3));
1651    default:
1652        return FORMAT_TYPE_UNDEFINED;
1653    }
1654}
1655
1656static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1657    uint32_t bit_pos = u_ffs(stage);
1658    return bit_pos - 1;
1659}
1660
1661static bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1662    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1663     * each binding should be specified only once.
1664     */
1665    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1666    bool pass = true;
1667
1668    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1669        auto desc = &vi->pVertexBindingDescriptions[i];
1670        auto &binding = bindings[desc->binding];
1671        if (binding) {
1672            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1673                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1674                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1675                pass = false;
1676            }
1677        } else {
1678            binding = desc;
1679        }
1680    }
1681
1682    return pass;
1683}
1684
1685static bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
1686                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1687    std::map<location_t, interface_var> inputs;
1688    bool pass = true;
1689
1690    collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, inputs, false);
1691
1692    /* Build index by location */
1693    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1694    if (vi) {
1695        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1696            auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1697            for (auto j = 0u; j < num_locations; j++) {
1698                attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1699            }
1700        }
1701    }
1702
1703    auto it_a = attribs.begin();
1704    auto it_b = inputs.begin();
1705
1706    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1707        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1708        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1709        auto a_first = a_at_end ? 0 : it_a->first;
1710        auto b_first = b_at_end ? 0 : it_b->first.first;
1711        if (!a_at_end && (b_at_end || a_first < b_first)) {
1712            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1713                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1714                        "Vertex attribute at location %d not consumed by VS", a_first)) {
1715                pass = false;
1716            }
1717            it_a++;
1718        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1719            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1720                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
1721                        b_first)) {
1722                pass = false;
1723            }
1724            it_b++;
1725        } else {
1726            unsigned attrib_type = get_format_type(it_a->second->format);
1727            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1728
1729            /* type checking */
1730            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1731                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1732                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1733                            "Attribute type of `%s` at location %d does not match VS input type of `%s`",
1734                            string_VkFormat(it_a->second->format), a_first,
1735                            describe_type(vs, it_b->second.type_id).c_str())) {
1736                    pass = false;
1737                }
1738            }
1739
1740            /* OK! */
1741            it_a++;
1742            it_b++;
1743        }
1744    }
1745
1746    return pass;
1747}
1748
1749static bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs,
1750                                                    spirv_inst_iter entrypoint, RENDER_PASS_NODE const *rp, uint32_t subpass) {
1751    std::map<location_t, interface_var> outputs;
1752    std::map<uint32_t, VkFormat> color_attachments;
1753    for (auto i = 0u; i < rp->subpassColorFormats[subpass].size(); i++) {
1754        if (rp->subpassColorFormats[subpass][i] != VK_FORMAT_UNDEFINED) {
1755            color_attachments[i] = rp->subpassColorFormats[subpass][i];
1756        }
1757    }
1758
1759    bool pass = true;
1760
1761    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1762
1763    collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, outputs, false);
1764
1765    auto it_a = outputs.begin();
1766    auto it_b = color_attachments.begin();
1767
1768    /* Walk attachment list and outputs together */
1769
1770    while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
1771        bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
1772        bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
1773
1774        if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
1775            if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1776                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1777                        "FS writes to output location %d with no matching attachment", it_a->first.first)) {
1778                pass = false;
1779            }
1780            it_a++;
1781        } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
1782            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1783                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", it_b->first)) {
1784                pass = false;
1785            }
1786            it_b++;
1787        } else {
1788            unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
1789            unsigned att_type = get_format_type(it_b->second);
1790
1791            /* type checking */
1792            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1793                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1794                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1795                            "Attachment %d of type `%s` does not match FS output type of `%s`", it_b->first,
1796                            string_VkFormat(it_b->second),
1797                            describe_type(fs, it_a->second.type_id).c_str())) {
1798                    pass = false;
1799                }
1800            }
1801
1802            /* OK! */
1803            it_a++;
1804            it_b++;
1805        }
1806    }
1807
1808    return pass;
1809}
1810
1811/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
1812 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
1813 * for example.
1814 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1815 *  - NOT the shader input/output interfaces.
1816 *
1817 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1818 * converting parts of this to be generated from the machine-readable spec instead.
1819 */
1820static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) {
1821    std::unordered_set<uint32_t> worklist;
1822    worklist.insert(entrypoint.word(2));
1823
1824    while (!worklist.empty()) {
1825        auto id_iter = worklist.begin();
1826        auto id = *id_iter;
1827        worklist.erase(id_iter);
1828
1829        auto insn = src->get_def(id);
1830        if (insn == src->end()) {
1831            /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble
1832             * across all kinds of things here that we may not care about. */
1833            continue;
1834        }
1835
1836        /* try to add to the output set */
1837        if (!ids.insert(id).second) {
1838            continue; /* if we already saw this id, we don't want to walk it again. */
1839        }
1840
1841        switch (insn.opcode()) {
1842        case spv::OpFunction:
1843            /* scan whole body of the function, enlisting anything interesting */
1844            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1845                switch (insn.opcode()) {
1846                case spv::OpLoad:
1847                case spv::OpAtomicLoad:
1848                case spv::OpAtomicExchange:
1849                case spv::OpAtomicCompareExchange:
1850                case spv::OpAtomicCompareExchangeWeak:
1851                case spv::OpAtomicIIncrement:
1852                case spv::OpAtomicIDecrement:
1853                case spv::OpAtomicIAdd:
1854                case spv::OpAtomicISub:
1855                case spv::OpAtomicSMin:
1856                case spv::OpAtomicUMin:
1857                case spv::OpAtomicSMax:
1858                case spv::OpAtomicUMax:
1859                case spv::OpAtomicAnd:
1860                case spv::OpAtomicOr:
1861                case spv::OpAtomicXor:
1862                    worklist.insert(insn.word(3)); /* ptr */
1863                    break;
1864                case spv::OpStore:
1865                case spv::OpAtomicStore:
1866                    worklist.insert(insn.word(1)); /* ptr */
1867                    break;
1868                case spv::OpAccessChain:
1869                case spv::OpInBoundsAccessChain:
1870                    worklist.insert(insn.word(3)); /* base ptr */
1871                    break;
1872                case spv::OpSampledImage:
1873                case spv::OpImageSampleImplicitLod:
1874                case spv::OpImageSampleExplicitLod:
1875                case spv::OpImageSampleDrefImplicitLod:
1876                case spv::OpImageSampleDrefExplicitLod:
1877                case spv::OpImageSampleProjImplicitLod:
1878                case spv::OpImageSampleProjExplicitLod:
1879                case spv::OpImageSampleProjDrefImplicitLod:
1880                case spv::OpImageSampleProjDrefExplicitLod:
1881                case spv::OpImageFetch:
1882                case spv::OpImageGather:
1883                case spv::OpImageDrefGather:
1884                case spv::OpImageRead:
1885                case spv::OpImage:
1886                case spv::OpImageQueryFormat:
1887                case spv::OpImageQueryOrder:
1888                case spv::OpImageQuerySizeLod:
1889                case spv::OpImageQuerySize:
1890                case spv::OpImageQueryLod:
1891                case spv::OpImageQueryLevels:
1892                case spv::OpImageQuerySamples:
1893                case spv::OpImageSparseSampleImplicitLod:
1894                case spv::OpImageSparseSampleExplicitLod:
1895                case spv::OpImageSparseSampleDrefImplicitLod:
1896                case spv::OpImageSparseSampleDrefExplicitLod:
1897                case spv::OpImageSparseSampleProjImplicitLod:
1898                case spv::OpImageSparseSampleProjExplicitLod:
1899                case spv::OpImageSparseSampleProjDrefImplicitLod:
1900                case spv::OpImageSparseSampleProjDrefExplicitLod:
1901                case spv::OpImageSparseFetch:
1902                case spv::OpImageSparseGather:
1903                case spv::OpImageSparseDrefGather:
1904                case spv::OpImageTexelPointer:
1905                    worklist.insert(insn.word(3)); /* image or sampled image */
1906                    break;
1907                case spv::OpImageWrite:
1908                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
1909                    break;
1910                case spv::OpFunctionCall:
1911                    for (uint32_t i = 3; i < insn.len(); i++) {
1912                        worklist.insert(insn.word(i)); /* fn itself, and all args */
1913                    }
1914                    break;
1915
1916                case spv::OpExtInst:
1917                    for (uint32_t i = 5; i < insn.len(); i++) {
1918                        worklist.insert(insn.word(i)); /* operands to ext inst */
1919                    }
1920                    break;
1921                }
1922            }
1923            break;
1924        }
1925    }
1926}
1927
1928static bool validate_push_constant_block_against_pipeline(debug_report_data *report_data,
1929                                                          std::vector<VkPushConstantRange> const *pushConstantRanges,
1930                                                          shader_module const *src, spirv_inst_iter type,
1931                                                          VkShaderStageFlagBits stage) {
1932    bool pass = true;
1933
1934    /* strip off ptrs etc */
1935    type = get_struct_type(src, type, false);
1936    assert(type != src->end());
1937
1938    /* validate directly off the offsets. this isn't quite correct for arrays
1939     * and matrices, but is a good first step. TODO: arrays, matrices, weird
1940     * sizes */
1941    for (auto insn : *src) {
1942        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1943
1944            if (insn.word(3) == spv::DecorationOffset) {
1945                unsigned offset = insn.word(4);
1946                auto size = 4; /* bytes; TODO: calculate this based on the type */
1947
1948                bool found_range = false;
1949                for (auto const &range : *pushConstantRanges) {
1950                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
1951                        found_range = true;
1952
1953                        if ((range.stageFlags & stage) == 0) {
1954                            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1955                                        __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
1956                                        "Push constant range covering variable starting at "
1957                                        "offset %u not accessible from stage %s",
1958                                        offset, string_VkShaderStageFlagBits(stage))) {
1959                                pass = false;
1960                            }
1961                        }
1962
1963                        break;
1964                    }
1965                }
1966
1967                if (!found_range) {
1968                    if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1969                                __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
1970                                "Push constant range covering variable starting at "
1971                                "offset %u not declared in layout",
1972                                offset)) {
1973                        pass = false;
1974                    }
1975                }
1976            }
1977        }
1978    }
1979
1980    return pass;
1981}
1982
1983static bool validate_push_constant_usage(debug_report_data *report_data,
1984                                         std::vector<VkPushConstantRange> const *pushConstantRanges, shader_module const *src,
1985                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
1986    bool pass = true;
1987
1988    for (auto id : accessible_ids) {
1989        auto def_insn = src->get_def(id);
1990        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
1991            pass &= validate_push_constant_block_against_pipeline(report_data, pushConstantRanges, src,
1992                                                                 src->get_def(def_insn.word(1)), stage);
1993        }
1994    }
1995
1996    return pass;
1997}
1998
1999// For given pipelineLayout verify that the set_layout_node at slot.first
2000//  has the requested binding at slot.second and return ptr to that binding
2001static VkDescriptorSetLayoutBinding const * get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout, descriptor_slot_t slot) {
2002
2003    if (!pipelineLayout)
2004        return nullptr;
2005
2006    if (slot.first >= pipelineLayout->descriptorSetLayouts.size())
2007        return nullptr;
2008
2009    return pipelineLayout->setLayouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
2010}
2011
2012// Block of code at start here for managing/tracking Pipeline state that this layer cares about
2013
2014static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
2015
2016// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2017//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2018//   to that same cmd buffer by separate thread are not changing state from underneath us
2019// Track the last cmd buffer touched by this thread
2020
2021static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2022    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2023        if (pCB->drawCount[i])
2024            return true;
2025    }
2026    return false;
2027}
2028
2029// Check object status for selected flag state
2030static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
2031                            DRAW_STATE_ERROR error_code, const char *fail_msg) {
2032    if (!(pNode->status & status_mask)) {
2033        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2034                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS",
2035                       "CB object 0x%" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg);
2036    }
2037    return false;
2038}
2039
2040// Retrieve pipeline node ptr for given pipeline object
2041static PIPELINE_NODE *getPipeline(layer_data const *my_data, VkPipeline pipeline) {
2042    auto it = my_data->pipelineMap.find(pipeline);
2043    if (it == my_data->pipelineMap.end()) {
2044        return nullptr;
2045    }
2046    return it->second;
2047}
2048
2049static RENDER_PASS_NODE *getRenderPass(layer_data const *my_data, VkRenderPass renderpass) {
2050    auto it = my_data->renderPassMap.find(renderpass);
2051    if (it == my_data->renderPassMap.end()) {
2052        return nullptr;
2053    }
2054    return it->second;
2055}
2056
2057static FRAMEBUFFER_NODE *getFramebuffer(const layer_data *my_data, VkFramebuffer framebuffer) {
2058    auto it = my_data->frameBufferMap.find(framebuffer);
2059    if (it == my_data->frameBufferMap.end()) {
2060        return nullptr;
2061    }
2062    return it->second.get();
2063}
2064
2065cvdescriptorset::DescriptorSetLayout const *getDescriptorSetLayout(layer_data const *my_data, VkDescriptorSetLayout dsLayout) {
2066    auto it = my_data->descriptorSetLayoutMap.find(dsLayout);
2067    if (it == my_data->descriptorSetLayoutMap.end()) {
2068        return nullptr;
2069    }
2070    return it->second;
2071}
2072
2073static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *my_data, VkPipelineLayout pipeLayout) {
2074    auto it = my_data->pipelineLayoutMap.find(pipeLayout);
2075    if (it == my_data->pipelineLayoutMap.end()) {
2076        return nullptr;
2077    }
2078    return &it->second;
2079}
2080
2081// Return true if for a given PSO, the given state enum is dynamic, else return false
2082static bool isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
2083    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2084        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2085            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2086                return true;
2087        }
2088    }
2089    return false;
2090}
2091
2092// Validate state stored as flags at time of draw call
2093static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe, bool indexedDraw) {
2094    bool result;
2095    result = validate_status(dev_data, pCB, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_VIEWPORT_NOT_BOUND,
2096                             "Dynamic viewport state not set for this command buffer");
2097    result |= validate_status(dev_data, pCB, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_SCISSOR_NOT_BOUND,
2098                              "Dynamic scissor state not set for this command buffer");
2099    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
2100        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2101         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
2102        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2103                                  DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
2104    }
2105    if (pPipe->graphicsPipelineCI.pRasterizationState &&
2106        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
2107        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2108                                  DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
2109    }
2110    if (pPipe->blendConstantsEnabled) {
2111        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2112                                  DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer");
2113    }
2114    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2115        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
2116        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2117                                  DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
2118    }
2119    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2120        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
2121        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2122                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
2123        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2124                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
2125        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2126                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
2127    }
2128    if (indexedDraw) {
2129        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2130                                  DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2131                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2132    }
2133    return result;
2134}
2135
2136// Verify attachment reference compatibility according to spec
2137//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2138//  If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions
2139//   to make sure that format and samples counts match.
2140//  If not, they are not compatible.
2141static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2142                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2143                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2144                                             const VkAttachmentDescription *pSecondaryAttachments) {
2145    // Check potential NULL cases first to avoid nullptr issues later
2146    if (pPrimary == nullptr) {
2147        if (pSecondary == nullptr) {
2148            return true;
2149        }
2150        return false;
2151    } else if (pSecondary == nullptr) {
2152        return false;
2153    }
2154    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2155        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2156            return true;
2157    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2158        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2159            return true;
2160    } else { // Format and sample count must match
2161        if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2162            return true;
2163        } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2164            return false;
2165        }
2166        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2167             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2168            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2169             pSecondaryAttachments[pSecondary[index].attachment].samples))
2170            return true;
2171    }
2172    // Format and sample counts didn't match
2173    return false;
2174}
2175
2176// For given primary and secondary RenderPass objects, verify that they're compatible
2177static bool verify_renderpass_compatibility(const layer_data *my_data, const VkRenderPass primaryRP, const VkRenderPass secondaryRP,
2178                                            string &errorMsg) {
2179    auto primary_render_pass = getRenderPass(my_data, primaryRP);
2180    auto secondary_render_pass = getRenderPass(my_data, secondaryRP);
2181
2182    if (!primary_render_pass) {
2183        stringstream errorStr;
2184        errorStr << "invalid VkRenderPass (" << primaryRP << ")";
2185        errorMsg = errorStr.str();
2186        return false;
2187    }
2188
2189    if (!secondary_render_pass) {
2190        stringstream errorStr;
2191        errorStr << "invalid VkRenderPass (" << secondaryRP << ")";
2192        errorMsg = errorStr.str();
2193        return false;
2194    }
2195    // Trivial pass case is exact same RP
2196    if (primaryRP == secondaryRP) {
2197        return true;
2198    }
2199    const VkRenderPassCreateInfo *primaryRPCI = primary_render_pass->pCreateInfo;
2200    const VkRenderPassCreateInfo *secondaryRPCI = secondary_render_pass->pCreateInfo;
2201    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2202        stringstream errorStr;
2203        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2204                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2205        errorMsg = errorStr.str();
2206        return false;
2207    }
2208    uint32_t spIndex = 0;
2209    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2210        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2211        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2212        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2213        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2214        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2215            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2216                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2217                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2218                stringstream errorStr;
2219                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2220                errorMsg = errorStr.str();
2221                return false;
2222            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2223                                                         primaryColorCount, primaryRPCI->pAttachments,
2224                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2225                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2226                stringstream errorStr;
2227                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2228                errorMsg = errorStr.str();
2229                return false;
2230            }
2231        }
2232
2233        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2234                                              1, primaryRPCI->pAttachments,
2235                                              secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2236                                              1, secondaryRPCI->pAttachments)) {
2237            stringstream errorStr;
2238            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2239            errorMsg = errorStr.str();
2240            return false;
2241        }
2242
2243        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2244        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2245        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2246        for (uint32_t i = 0; i < inputMax; ++i) {
2247            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2248                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2249                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2250                stringstream errorStr;
2251                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2252                errorMsg = errorStr.str();
2253                return false;
2254            }
2255        }
2256    }
2257    return true;
2258}
2259
2260// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
2261// pipelineLayout[layoutIndex]
2262static bool verify_set_layout_compatibility(layer_data *my_data, const cvdescriptorset::DescriptorSet *pSet,
2263                                            const VkPipelineLayout layout, const uint32_t layoutIndex, string &errorMsg) {
2264    auto pipeline_layout = getPipelineLayout(my_data, layout);
2265    if (!pipeline_layout) {
2266        stringstream errorStr;
2267        errorStr << "invalid VkPipelineLayout (" << layout << ")";
2268        errorMsg = errorStr.str();
2269        return false;
2270    }
2271    if (layoutIndex >= pipeline_layout->descriptorSetLayouts.size()) {
2272        stringstream errorStr;
2273        errorStr << "VkPipelineLayout (" << layout << ") only contains " << pipeline_layout->descriptorSetLayouts.size()
2274                 << " setLayouts corresponding to sets 0-" << pipeline_layout->descriptorSetLayouts.size() - 1
2275                 << ", but you're attempting to bind set to index " << layoutIndex;
2276        errorMsg = errorStr.str();
2277        return false;
2278    }
2279    auto layout_node = pipeline_layout->setLayouts[layoutIndex];
2280    return pSet->IsCompatible(layout_node, &errorMsg);
2281}
2282
2283// Validate that data for each specialization entry is fully contained within the buffer.
2284static bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) {
2285    bool pass = true;
2286
2287    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2288
2289    if (spec) {
2290        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2291            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2292                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2293                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2294                            "Specialization entry %u (for constant id %u) references memory outside provided "
2295                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2296                            " bytes provided)",
2297                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2298                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2299
2300                    pass = false;
2301                }
2302            }
2303        }
2304    }
2305
2306    return pass;
2307}
2308
2309static bool descriptor_type_match(shader_module const *module, uint32_t type_id,
2310                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2311    auto type = module->get_def(type_id);
2312
2313    descriptor_count = 1;
2314
2315    /* Strip off any array or ptrs. Where we remove array levels, adjust the
2316     * descriptor count for each dimension. */
2317    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2318        if (type.opcode() == spv::OpTypeArray) {
2319            descriptor_count *= get_constant_value(module, type.word(3));
2320            type = module->get_def(type.word(2));
2321        }
2322        else {
2323            type = module->get_def(type.word(3));
2324        }
2325    }
2326
2327    switch (type.opcode()) {
2328    case spv::OpTypeStruct: {
2329        for (auto insn : *module) {
2330            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2331                if (insn.word(2) == spv::DecorationBlock) {
2332                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2333                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2334                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2335                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2336                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2337                }
2338            }
2339        }
2340
2341        /* Invalid */
2342        return false;
2343    }
2344
2345    case spv::OpTypeSampler:
2346        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER;
2347
2348    case spv::OpTypeSampledImage:
2349        if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2350            /* Slight relaxation for some GLSL historical madness: samplerBuffer
2351             * doesn't really have a sampler, and a texel buffer descriptor
2352             * doesn't really provide one. Allow this slight mismatch.
2353             */
2354            auto image_type = module->get_def(type.word(2));
2355            auto dim = image_type.word(3);
2356            auto sampled = image_type.word(7);
2357            return dim == spv::DimBuffer && sampled == 1;
2358        }
2359        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2360
2361    case spv::OpTypeImage: {
2362        /* Many descriptor types backing image types-- depends on dimension
2363         * and whether the image will be used with a sampler. SPIRV for
2364         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2365         * runtime is unacceptable.
2366         */
2367        auto dim = type.word(3);
2368        auto sampled = type.word(7);
2369
2370        if (dim == spv::DimSubpassData) {
2371            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2372        } else if (dim == spv::DimBuffer) {
2373            if (sampled == 1) {
2374                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2375            } else {
2376                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2377            }
2378        } else if (sampled == 1) {
2379            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
2380        } else {
2381            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2382        }
2383    }
2384
2385    /* We shouldn't really see any other junk types -- but if we do, they're
2386     * a mismatch.
2387     */
2388    default:
2389        return false; /* Mismatch */
2390    }
2391}
2392
2393static bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) {
2394    if (!feature) {
2395        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2396                    __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2397                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2398                    "enabled on the device",
2399                    feature_name)) {
2400            return false;
2401        }
2402    }
2403
2404    return true;
2405}
2406
2407static bool validate_shader_capabilities(debug_report_data *report_data, shader_module const *src,
2408                                         VkPhysicalDeviceFeatures const *enabledFeatures) {
2409    bool pass = true;
2410
2411
2412    for (auto insn : *src) {
2413        if (insn.opcode() == spv::OpCapability) {
2414            switch (insn.word(1)) {
2415            case spv::CapabilityMatrix:
2416            case spv::CapabilityShader:
2417            case spv::CapabilityInputAttachment:
2418            case spv::CapabilitySampled1D:
2419            case spv::CapabilityImage1D:
2420            case spv::CapabilitySampledBuffer:
2421            case spv::CapabilityImageBuffer:
2422            case spv::CapabilityImageQuery:
2423            case spv::CapabilityDerivativeControl:
2424                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2425                break;
2426
2427            case spv::CapabilityGeometry:
2428                pass &= require_feature(report_data, enabledFeatures->geometryShader, "geometryShader");
2429                break;
2430
2431            case spv::CapabilityTessellation:
2432                pass &= require_feature(report_data, enabledFeatures->tessellationShader, "tessellationShader");
2433                break;
2434
2435            case spv::CapabilityFloat64:
2436                pass &= require_feature(report_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2437                break;
2438
2439            case spv::CapabilityInt64:
2440                pass &= require_feature(report_data, enabledFeatures->shaderInt64, "shaderInt64");
2441                break;
2442
2443            case spv::CapabilityTessellationPointSize:
2444            case spv::CapabilityGeometryPointSize:
2445                pass &= require_feature(report_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2446                                        "shaderTessellationAndGeometryPointSize");
2447                break;
2448
2449            case spv::CapabilityImageGatherExtended:
2450                pass &= require_feature(report_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2451                break;
2452
2453            case spv::CapabilityStorageImageMultisample:
2454                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2455                break;
2456
2457            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2458                pass &= require_feature(report_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2459                                        "shaderUniformBufferArrayDynamicIndexing");
2460                break;
2461
2462            case spv::CapabilitySampledImageArrayDynamicIndexing:
2463                pass &= require_feature(report_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2464                                        "shaderSampledImageArrayDynamicIndexing");
2465                break;
2466
2467            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2468                pass &= require_feature(report_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2469                                        "shaderStorageBufferArrayDynamicIndexing");
2470                break;
2471
2472            case spv::CapabilityStorageImageArrayDynamicIndexing:
2473                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2474                                        "shaderStorageImageArrayDynamicIndexing");
2475                break;
2476
2477            case spv::CapabilityClipDistance:
2478                pass &= require_feature(report_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2479                break;
2480
2481            case spv::CapabilityCullDistance:
2482                pass &= require_feature(report_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2483                break;
2484
2485            case spv::CapabilityImageCubeArray:
2486                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2487                break;
2488
2489            case spv::CapabilitySampleRateShading:
2490                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2491                break;
2492
2493            case spv::CapabilitySparseResidency:
2494                pass &= require_feature(report_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2495                break;
2496
2497            case spv::CapabilityMinLod:
2498                pass &= require_feature(report_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2499                break;
2500
2501            case spv::CapabilitySampledCubeArray:
2502                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2503                break;
2504
2505            case spv::CapabilityImageMSArray:
2506                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2507                break;
2508
2509            case spv::CapabilityStorageImageExtendedFormats:
2510                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageExtendedFormats,
2511                                        "shaderStorageImageExtendedFormats");
2512                break;
2513
2514            case spv::CapabilityInterpolationFunction:
2515                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2516                break;
2517
2518            case spv::CapabilityStorageImageReadWithoutFormat:
2519                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2520                                        "shaderStorageImageReadWithoutFormat");
2521                break;
2522
2523            case spv::CapabilityStorageImageWriteWithoutFormat:
2524                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2525                                        "shaderStorageImageWriteWithoutFormat");
2526                break;
2527
2528            case spv::CapabilityMultiViewport:
2529                pass &= require_feature(report_data, enabledFeatures->multiViewport, "multiViewport");
2530                break;
2531
2532            default:
2533                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2534                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2535                            "Shader declares capability %u, not supported in Vulkan.",
2536                            insn.word(1)))
2537                    pass = false;
2538                break;
2539            }
2540        }
2541    }
2542
2543    return pass;
2544}
2545
2546static bool validate_pipeline_shader_stage(debug_report_data *report_data,
2547                                           VkPipelineShaderStageCreateInfo const *pStage,
2548                                           PIPELINE_NODE *pipeline,
2549                                           shader_module **out_module,
2550                                           spirv_inst_iter *out_entrypoint,
2551                                           VkPhysicalDeviceFeatures const *enabledFeatures,
2552                                           std::unordered_map<VkShaderModule,
2553                                           std::unique_ptr<shader_module>> const &shaderModuleMap) {
2554    bool pass = true;
2555    auto module_it = shaderModuleMap.find(pStage->module);
2556    auto module = *out_module = module_it->second.get();
2557    pass &= validate_specialization_offsets(report_data, pStage);
2558
2559    /* find the entrypoint */
2560    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2561    if (entrypoint == module->end()) {
2562        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2563                    __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2564                    "No entrypoint found named `%s` for stage %s", pStage->pName,
2565                    string_VkShaderStageFlagBits(pStage->stage))) {
2566            pass = false;
2567        }
2568    }
2569
2570    /* validate shader capabilities against enabled device features */
2571    pass &= validate_shader_capabilities(report_data, module, enabledFeatures);
2572
2573    /* mark accessible ids */
2574    std::unordered_set<uint32_t> accessible_ids;
2575    mark_accessible_ids(module, entrypoint, accessible_ids);
2576
2577    /* validate descriptor set layout against what the entrypoint actually uses */
2578    std::map<descriptor_slot_t, interface_var> descriptor_uses;
2579    collect_interface_by_descriptor_slot(report_data, module, accessible_ids, descriptor_uses);
2580
2581    auto pipelineLayout = pipeline->pipelineLayout;
2582
2583    /* validate push constant usage */
2584    pass &= validate_push_constant_usage(report_data, &pipelineLayout->pushConstantRanges,
2585                                        module, accessible_ids, pStage->stage);
2586
2587    /* validate descriptor use */
2588    for (auto use : descriptor_uses) {
2589        // While validating shaders capture which slots are used by the pipeline
2590        pipeline->active_slots[use.first.first].insert(use.first.second);
2591
2592        /* verify given pipelineLayout has requested setLayout with requested binding */
2593        const auto & binding = get_descriptor_binding(pipelineLayout, use.first);
2594        unsigned required_descriptor_count;
2595
2596        if (!binding) {
2597            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2598                        __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2599                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2600                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2601                pass = false;
2602            }
2603        } else if (~binding->stageFlags & pStage->stage) {
2604            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2605                        /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2606                        "Shader uses descriptor slot %u.%u (used "
2607                        "as type `%s`) but descriptor not "
2608                        "accessible from stage %s",
2609                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2610                        string_VkShaderStageFlagBits(pStage->stage))) {
2611                pass = false;
2612            }
2613        } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType,
2614                                          /*out*/ required_descriptor_count)) {
2615            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2616                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", "Type mismatch on descriptor slot "
2617                                                                       "%u.%u (used as type `%s`) but "
2618                                                                       "descriptor of type %s",
2619                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2620                        string_VkDescriptorType(binding->descriptorType))) {
2621                pass = false;
2622            }
2623        } else if (binding->descriptorCount < required_descriptor_count) {
2624            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2625                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2626                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2627                        required_descriptor_count, use.first.first, use.first.second,
2628                        describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
2629                pass = false;
2630            }
2631        }
2632    }
2633
2634    return pass;
2635}
2636
2637
2638// Validate that the shaders used by the given pipeline and store the active_slots
2639//  that are actually used by the pipeline into pPipeline->active_slots
2640static bool validate_and_capture_pipeline_shader_state(debug_report_data *report_data, PIPELINE_NODE *pPipeline,
2641                                                       VkPhysicalDeviceFeatures const *enabledFeatures,
2642                                                       std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
2643    auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr();
2644    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2645    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2646
2647    shader_module *shaders[5];
2648    memset(shaders, 0, sizeof(shaders));
2649    spirv_inst_iter entrypoints[5];
2650    memset(entrypoints, 0, sizeof(entrypoints));
2651    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2652    bool pass = true;
2653
2654    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2655        auto pStage = &pCreateInfo->pStages[i];
2656        auto stage_id = get_shader_stage_id(pStage->stage);
2657        pass &= validate_pipeline_shader_stage(report_data, pStage, pPipeline,
2658                                               &shaders[stage_id], &entrypoints[stage_id],
2659                                               enabledFeatures, shaderModuleMap);
2660    }
2661
2662    vi = pCreateInfo->pVertexInputState;
2663
2664    if (vi) {
2665        pass &= validate_vi_consistency(report_data, vi);
2666    }
2667
2668    if (shaders[vertex_stage]) {
2669        pass &= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2670    }
2671
2672    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2673    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2674
2675    while (!shaders[producer] && producer != fragment_stage) {
2676        producer++;
2677        consumer++;
2678    }
2679
2680    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2681        assert(shaders[producer]);
2682        if (shaders[consumer]) {
2683            pass &= validate_interface_between_stages(report_data,
2684                                                      shaders[producer], entrypoints[producer], &shader_stage_attribs[producer],
2685                                                      shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]);
2686
2687            producer = consumer;
2688        }
2689    }
2690
2691    if (shaders[fragment_stage] && pPipeline->renderPass) {
2692        pass &= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage],
2693                                                        pPipeline->renderPass, pCreateInfo->subpass);
2694    }
2695
2696    return pass;
2697}
2698
2699static bool validate_compute_pipeline(debug_report_data *report_data, PIPELINE_NODE *pPipeline, VkPhysicalDeviceFeatures const *enabledFeatures,
2700                                      std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
2701    auto pCreateInfo = pPipeline->computePipelineCI.ptr();
2702
2703    shader_module *module;
2704    spirv_inst_iter entrypoint;
2705
2706    return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline,
2707                                          &module, &entrypoint, enabledFeatures, shaderModuleMap);
2708}
2709// Return Set node ptr for specified set or else NULL
2710cvdescriptorset::DescriptorSet *getSetNode(const layer_data *my_data, VkDescriptorSet set) {
2711    auto set_it = my_data->setMap.find(set);
2712    if (set_it == my_data->setMap.end()) {
2713        return NULL;
2714    }
2715    return set_it->second;
2716}
2717// For the given command buffer, verify and update the state for activeSetBindingsPairs
2718//  This includes:
2719//  1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound.
2720//     To be valid, the dynamic offset combined with the offset and range from its
2721//     descriptor update must not overflow the size of its buffer being updated
2722//  2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images
2723//  3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers
2724static bool validate_and_update_drawtime_descriptor_state(
2725    layer_data *dev_data, GLOBAL_CB_NODE *pCB,
2726    const vector<std::tuple<cvdescriptorset::DescriptorSet *, unordered_set<uint32_t>,
2727                            std::vector<uint32_t> const *>> &activeSetBindingsPairs) {
2728    bool result = false;
2729    for (auto set_bindings_pair : activeSetBindingsPairs) {
2730        cvdescriptorset::DescriptorSet *set_node = std::get<0>(set_bindings_pair);
2731        std::string err_str;
2732        if (!set_node->ValidateDrawState(std::get<1>(set_bindings_pair), *std::get<2>(set_bindings_pair),
2733                                         &err_str)) {
2734            // Report error here
2735            auto set = set_node->GetSet();
2736            result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2737                              reinterpret_cast<const uint64_t &>(set), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2738                              "DS 0x%" PRIxLEAST64 " encountered the following validation error at draw time: %s",
2739                              reinterpret_cast<const uint64_t &>(set), err_str.c_str());
2740        }
2741        set_node->GetStorageUpdates(std::get<1>(set_bindings_pair), &pCB->updateBuffers, &pCB->updateImages);
2742    }
2743    return result;
2744}
2745
2746// For given pipeline, return number of MSAA samples, or one if MSAA disabled
2747static VkSampleCountFlagBits getNumSamples(PIPELINE_NODE const *pipe) {
2748    if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
2749        VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
2750        return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
2751    }
2752    return VK_SAMPLE_COUNT_1_BIT;
2753}
2754
2755// Validate draw-time state related to the PSO
2756static bool validatePipelineDrawtimeState(layer_data const *my_data,
2757                                          LAST_BOUND_STATE const &state,
2758                                          const GLOBAL_CB_NODE *pCB,
2759                                          PIPELINE_NODE const *pPipeline) {
2760    bool skip_call = false;
2761
2762    // Verify Vtx binding
2763    if (pPipeline->vertexBindingDescriptions.size() > 0) {
2764        for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
2765            if ((pCB->currentDrawData.buffers.size() < (i + 1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) {
2766                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2767                                  __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2768                                  "The Pipeline State Object (0x%" PRIxLEAST64
2769                                  ") expects that this Command Buffer's vertex binding Index " PRINTF_SIZE_T_SPECIFIER
2770                                  " should be set via vkCmdBindVertexBuffers.",
2771                                  (uint64_t)state.pipeline, i);
2772            }
2773        }
2774    } else {
2775        if (!pCB->currentDrawData.buffers.empty()) {
2776            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
2777                              0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2778                              "Vertex buffers are bound to command buffer (0x%" PRIxLEAST64
2779                              ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
2780                              (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline);
2781        }
2782    }
2783    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2784    // Skip check if rasterization is disabled or there is no viewport.
2785    if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
2786         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
2787        pPipeline->graphicsPipelineCI.pViewportState) {
2788        bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
2789        bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
2790        if (dynViewport) {
2791            if (pCB->viewports.size() != pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
2792                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2793                                  __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2794                                  "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER
2795                                  ", but PSO viewportCount is %u. These counts must match.",
2796                                  pCB->viewports.size(), pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
2797            }
2798        }
2799        if (dynScissor) {
2800            if (pCB->scissors.size() != pPipeline->graphicsPipelineCI.pViewportState->scissorCount) {
2801                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2802                                  __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2803                                  "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER
2804                                  ", but PSO scissorCount is %u. These counts must match.",
2805                                  pCB->scissors.size(), pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
2806            }
2807        }
2808    }
2809
2810    // Verify that any MSAA request in PSO matches sample# in bound FB
2811    // Skip the check if rasterization is disabled.
2812    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
2813        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
2814        VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
2815        if (pCB->activeRenderPass) {
2816            const VkRenderPassCreateInfo *render_pass_info = pCB->activeRenderPass->pCreateInfo;
2817            const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
2818            VkSampleCountFlagBits subpass_num_samples = VkSampleCountFlagBits(0);
2819            uint32_t i;
2820
2821            const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
2822            if ((color_blend_state != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) &&
2823                (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount)) {
2824                skip_call |=
2825                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2826                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
2827                                "Render pass subpass %u mismatch with blending state defined and blend state attachment "
2828                                "count %u while subpass color attachment count %u in Pipeline (0x%" PRIxLEAST64 ")!  These "
2829                                "must be the same at draw-time.",
2830                                pCB->activeSubpass, color_blend_state->attachmentCount, subpass_desc->colorAttachmentCount,
2831                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
2832            }
2833
2834            for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
2835                VkSampleCountFlagBits samples;
2836
2837                if (subpass_desc->pColorAttachments[i].attachment == VK_ATTACHMENT_UNUSED)
2838                    continue;
2839
2840                samples = render_pass_info->pAttachments[subpass_desc->pColorAttachments[i].attachment].samples;
2841                if (subpass_num_samples == static_cast<VkSampleCountFlagBits>(0)) {
2842                    subpass_num_samples = samples;
2843                } else if (subpass_num_samples != samples) {
2844                    subpass_num_samples = static_cast<VkSampleCountFlagBits>(-1);
2845                    break;
2846                }
2847            }
2848            if ((subpass_desc->pDepthStencilAttachment != NULL) &&
2849                (subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
2850                const VkSampleCountFlagBits samples =
2851                        render_pass_info->pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples;
2852                if (subpass_num_samples == static_cast<VkSampleCountFlagBits>(0))
2853                    subpass_num_samples = samples;
2854                else if (subpass_num_samples != samples)
2855                    subpass_num_samples = static_cast<VkSampleCountFlagBits>(-1);
2856            }
2857
2858            if (((subpass_desc->colorAttachmentCount > 0) || (subpass_desc->pDepthStencilAttachment != NULL)) &&
2859                (pso_num_samples != subpass_num_samples)) {
2860                skip_call |=
2861                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2862                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
2863                                "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
2864                                ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
2865                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples,
2866                                reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples);
2867            }
2868        } else {
2869            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2870                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
2871                                 "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
2872                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
2873        }
2874    }
2875    // Verify that PSO creation renderPass is compatible with active renderPass
2876    if (pCB->activeRenderPass) {
2877        std::string err_string;
2878        if (!verify_renderpass_compatibility(my_data, pCB->activeRenderPass->renderPass, pPipeline->graphicsPipelineCI.renderPass,
2879                                             err_string)) {
2880            // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
2881            skip_call |=
2882                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2883                        reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
2884                        "At Draw time the active render pass (0x%" PRIxLEAST64 ") is incompatible w/ gfx pipeline "
2885                        "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
2886                        reinterpret_cast<uint64_t &>(pCB->activeRenderPass->renderPass), reinterpret_cast<uint64_t &>(pPipeline),
2887                        reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
2888        }
2889    }
2890    // TODO : Add more checks here
2891
2892    return skip_call;
2893}
2894
2895// Validate overall state at the time of a draw call
2896static bool validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, const bool indexedDraw,
2897                                           const VkPipelineBindPoint bindPoint) {
2898    bool result = false;
2899    auto const &state = pCB->lastBound[bindPoint];
2900    PIPELINE_NODE *pPipe = getPipeline(my_data, state.pipeline);
2901    if (nullptr == pPipe) {
2902        result |= log_msg(
2903            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2904            DRAWSTATE_INVALID_PIPELINE, "DS",
2905            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
2906        // Early return as any further checks below will be busted w/o a pipeline
2907        if (result)
2908            return true;
2909    }
2910    // First check flag states
2911    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
2912        result = validate_draw_state_flags(my_data, pCB, pPipe, indexedDraw);
2913
2914    // Now complete other state checks
2915    if (state.pipelineLayout) {
2916        string errorString;
2917        auto pipelineLayout = (bindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) ? pPipe->graphicsPipelineCI.layout : pPipe->computePipelineCI.layout;
2918
2919        // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
2920        vector<std::tuple<cvdescriptorset::DescriptorSet *, unordered_set<uint32_t>, std::vector<uint32_t> const *>> activeSetBindingsPairs;
2921        for (auto & setBindingPair : pPipe->active_slots) {
2922            uint32_t setIndex = setBindingPair.first;
2923            // If valid set is not bound throw an error
2924            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
2925                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2926                                  DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
2927                                  "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline,
2928                                  setIndex);
2929            } else if (!verify_set_layout_compatibility(my_data, state.boundDescriptorSets[setIndex],
2930                                                        pipelineLayout, setIndex, errorString)) {
2931                // Set is bound but not compatible w/ overlapping pipelineLayout from PSO
2932                VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
2933                result |=
2934                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2935                            (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
2936                            "VkDescriptorSet (0x%" PRIxLEAST64
2937                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
2938                            (uint64_t)setHandle, setIndex, (uint64_t)pipelineLayout, errorString.c_str());
2939            } else { // Valid set is bound and layout compatible, validate that it's updated
2940                // Pull the set node
2941                cvdescriptorset::DescriptorSet *pSet = state.boundDescriptorSets[setIndex];
2942                // Save vector of all active sets to verify dynamicOffsets below
2943                activeSetBindingsPairs.push_back(std::make_tuple(pSet, setBindingPair.second,
2944                                                                 &state.dynamicOffsets[setIndex]));
2945                // Make sure set has been updated if it has no immutable samplers
2946                //  If it has immutable samplers, we'll flag error later as needed depending on binding
2947                if (!pSet->IsUpdated()) {
2948                    for (auto binding : setBindingPair.second) {
2949                        if (!pSet->GetImmutableSamplerPtrFromBinding(binding)) {
2950                            result |= log_msg(
2951                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2952                                (uint64_t)pSet->GetSet(), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2953                                "DS 0x%" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
2954                                "this will result in undefined behavior.",
2955                                (uint64_t)pSet->GetSet());
2956                        }
2957                    }
2958                }
2959            }
2960        }
2961        // For given active slots, verify any dynamic descriptors and record updated images & buffers
2962        result |= validate_and_update_drawtime_descriptor_state(my_data, pCB, activeSetBindingsPairs);
2963    }
2964
2965    // Check general pipeline state that needs to be validated at drawtime
2966    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
2967        result |= validatePipelineDrawtimeState(my_data, state, pCB, pPipe);
2968
2969    return result;
2970}
2971
2972// Validate HW line width capabilities prior to setting requested line width.
2973static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) {
2974    bool skip_call = false;
2975
2976    // First check to see if the physical device supports wide lines.
2977    if ((VK_FALSE == my_data->phys_dev_properties.features.wideLines) && (1.0f != lineWidth)) {
2978        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__,
2979                             dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature "
2980                                            "not supported/enabled so lineWidth must be 1.0f!",
2981                             lineWidth);
2982    } else {
2983        // Otherwise, make sure the width falls in the valid range.
2984        if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
2985            (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
2986            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target,
2987                                 __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width "
2988                                                          "to between [%f, %f]!",
2989                                 lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0],
2990                                 my_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
2991        }
2992    }
2993
2994    return skip_call;
2995}
2996
2997// Verify that create state for a pipeline is valid
2998static bool verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
2999                                      int pipelineIndex) {
3000    bool skipCall = false;
3001
3002    PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
3003
3004    // If create derivative bit is set, check that we've specified a base
3005    // pipeline correctly, and that the base pipeline was created to allow
3006    // derivatives.
3007    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3008        PIPELINE_NODE *pBasePipeline = nullptr;
3009        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3010              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3011            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3012                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3013                                "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3014        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3015            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3016                skipCall |=
3017                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3018                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3019                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
3020            } else {
3021                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3022            }
3023        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3024            pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3025        }
3026
3027        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3028            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3029                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3030                                "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3031        }
3032    }
3033
3034    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3035        if (!my_data->phys_dev_properties.features.independentBlend) {
3036            if (pPipeline->attachments.size() > 1) {
3037                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3038                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3039                    if ((pAttachments[0].blendEnable != pAttachments[i].blendEnable) ||
3040                        (pAttachments[0].srcColorBlendFactor != pAttachments[i].srcColorBlendFactor) ||
3041                        (pAttachments[0].dstColorBlendFactor != pAttachments[i].dstColorBlendFactor) ||
3042                        (pAttachments[0].colorBlendOp != pAttachments[i].colorBlendOp) ||
3043                        (pAttachments[0].srcAlphaBlendFactor != pAttachments[i].srcAlphaBlendFactor) ||
3044                        (pAttachments[0].dstAlphaBlendFactor != pAttachments[i].dstAlphaBlendFactor) ||
3045                        (pAttachments[0].alphaBlendOp != pAttachments[i].alphaBlendOp) ||
3046                        (pAttachments[0].colorWriteMask != pAttachments[i].colorWriteMask)) {
3047                        skipCall |=
3048                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3049                            DRAWSTATE_INDEPENDENT_BLEND, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
3050                            "enabled, all elements of pAttachments must be identical");
3051                    }
3052                }
3053            }
3054        }
3055        if (!my_data->phys_dev_properties.features.logicOp &&
3056            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3057            skipCall |=
3058                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3059                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
3060                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
3061        }
3062        if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) &&
3063            ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) ||
3064             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
3065            skipCall |=
3066                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3067                        DRAWSTATE_INVALID_LOGIC_OP, "DS",
3068                        "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
3069        }
3070    }
3071
3072    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3073    // produces nonsense errors that confuse users. Other layers should already
3074    // emit errors for renderpass being invalid.
3075    auto renderPass = getRenderPass(my_data, pPipeline->graphicsPipelineCI.renderPass);
3076    if (renderPass &&
3077        pPipeline->graphicsPipelineCI.subpass >= renderPass->pCreateInfo->subpassCount) {
3078        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3079                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3080                                                                           "is out of range for this renderpass (0..%u)",
3081                            pPipeline->graphicsPipelineCI.subpass, renderPass->pCreateInfo->subpassCount - 1);
3082    }
3083
3084    if (!validate_and_capture_pipeline_shader_state(my_data->report_data, pPipeline, &my_data->phys_dev_properties.features,
3085                                                    my_data->shaderModuleMap)) {
3086        skipCall = true;
3087    }
3088    // Each shader's stage must be unique
3089    if (pPipeline->duplicate_shaders) {
3090        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
3091            if (pPipeline->duplicate_shaders & stage) {
3092                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3093                                    __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3094                                    "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
3095                                    string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
3096            }
3097        }
3098    }
3099    // VS is required
3100    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3101        skipCall |=
3102            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3103                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3104    }
3105    // Either both or neither TC/TE shaders should be defined
3106    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3107        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3108        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3109                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3110                            "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3111    }
3112    // Compute shaders should be specified independent of Gfx shaders
3113    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3114        (pPipeline->active_shaders &
3115         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3116          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3117        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3118                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3119                            "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3120    }
3121    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3122    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3123    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3124        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3125         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3126        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3127                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3128                                                                           "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3129                                                                           "topology for tessellation pipelines");
3130    }
3131    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3132        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3133        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3134            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3135                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3136                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3137                                                                               "topology is only valid for tessellation pipelines");
3138        }
3139        if (!pPipeline->graphicsPipelineCI.pTessellationState) {
3140            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3141                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3142                                "Invalid Pipeline CreateInfo State: "
3143                                "pTessellationState is NULL when VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3144                                "topology used. pTessellationState must not be NULL in this case.");
3145        } else if (!pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints ||
3146                   (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints > 32)) {
3147            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3148                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3149                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3150                                                                               "topology used with patchControlPoints value %u."
3151                                                                               " patchControlPoints should be >0 and <=32.",
3152                                pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints);
3153        }
3154    }
3155    // If a rasterization state is provided, make sure that the line width conforms to the HW.
3156    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3157        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
3158            skipCall |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, reinterpret_cast<uint64_t &>(pPipeline),
3159                                        pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
3160        }
3161    }
3162    // Viewport state must be included if rasterization is enabled.
3163    // If the viewport state is included, the viewport and scissor counts should always match.
3164    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3165    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3166        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3167        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3168            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3169                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3170                                                                           "and scissors are dynamic PSO must include "
3171                                                                           "viewportCount and scissorCount in pViewportState.");
3172        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3173                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3174            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3175                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3176                                "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3177                                pPipeline->graphicsPipelineCI.pViewportState->viewportCount,
3178                                pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3179        } else {
3180            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3181            bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3182            bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3183            if (!dynViewport) {
3184                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3185                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3186                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3187                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3188                                        "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3189                                        "must either include pViewports data, or include viewport in pDynamicState and set it with "
3190                                        "vkCmdSetViewport().",
3191                                        pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3192                }
3193            }
3194            if (!dynScissor) {
3195                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3196                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3197                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3198                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3199                                        "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3200                                        "must either include pScissors data, or include scissor in pDynamicState and set it with "
3201                                        "vkCmdSetScissor().",
3202                                        pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3203                }
3204            }
3205        }
3206    }
3207    return skipCall;
3208}
3209
3210// Free the Pipeline nodes
3211static void deletePipelines(layer_data *my_data) {
3212    if (my_data->pipelineMap.size() <= 0)
3213        return;
3214    for (auto &pipe_map_pair : my_data->pipelineMap) {
3215        delete pipe_map_pair.second;
3216    }
3217    my_data->pipelineMap.clear();
3218}
3219
3220// Block of code at start here specifically for managing/tracking DSs
3221
3222// Return Pool node ptr for specified pool or else NULL
3223DESCRIPTOR_POOL_NODE *getPoolNode(const layer_data *dev_data, const VkDescriptorPool pool) {
3224    auto pool_it = dev_data->descriptorPoolMap.find(pool);
3225    if (pool_it == dev_data->descriptorPoolMap.end()) {
3226        return NULL;
3227    }
3228    return pool_it->second;
3229}
3230
3231// Return false if update struct is of valid type, otherwise flag error and return code from callback
3232static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3233    switch (pUpdateStruct->sType) {
3234    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3235    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3236        return false;
3237    default:
3238        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3239                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3240                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3241                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3242    }
3243}
3244
3245// Set count for given update struct in the last parameter
3246static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3247    switch (pUpdateStruct->sType) {
3248    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3249        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3250    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3251        // TODO : Need to understand this case better and make sure code is correct
3252        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3253    default:
3254        return 0;
3255    }
3256}
3257
3258// For given layout and update, return the first overall index of the layout that is updated
3259static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3260                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3261    return binding_start_index + arrayIndex;
3262}
3263// For given layout and update, return the last overall index of the layout that is updated
3264static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3265                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3266    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3267    return binding_start_index + arrayIndex + count - 1;
3268}
3269// Verify that the descriptor type in the update struct matches what's expected by the layout
3270static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type,
3271                                      const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3272    // First get actual type of update
3273    bool skipCall = false;
3274    VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM;
3275    switch (pUpdateStruct->sType) {
3276    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3277        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3278        break;
3279    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3280        /* no need to validate */
3281        return false;
3282        break;
3283    default:
3284        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3285                            DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3286                            "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3287                            string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3288    }
3289    if (!skipCall) {
3290        if (layout_type != actualType) {
3291            skipCall |= log_msg(
3292                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3293                DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3294                "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3295                string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type));
3296        }
3297    }
3298    return skipCall;
3299}
3300//TODO: Consolidate functions
3301bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
3302    layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
3303    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3304        return false;
3305    }
3306    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3307    imgpair.subresource.aspectMask = aspectMask;
3308    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3309    if (imgsubIt == pCB->imageLayoutMap.end()) {
3310        return false;
3311    }
3312    if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
3313        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3314                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3315                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3316                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout));
3317    }
3318    if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
3319        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3320                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3321                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s",
3322                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout));
3323    }
3324    node = imgsubIt->second;
3325    return true;
3326}
3327
3328bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
3329    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3330        return false;
3331    }
3332    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3333    imgpair.subresource.aspectMask = aspectMask;
3334    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3335    if (imgsubIt == my_data->imageLayoutMap.end()) {
3336        return false;
3337    }
3338    if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
3339        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3340                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3341                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3342                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout));
3343    }
3344    layout = imgsubIt->second.layout;
3345    return true;
3346}
3347
3348// find layout(s) on the cmd buf level
3349bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3350    ImageSubresourcePair imgpair = {image, true, range};
3351    node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
3352    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
3353    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
3354    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
3355    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
3356    if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3357        imgpair = {image, false, VkImageSubresource()};
3358        auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3359        if (imgsubIt == pCB->imageLayoutMap.end())
3360            return false;
3361        node = imgsubIt->second;
3362    }
3363    return true;
3364}
3365
3366// find layout(s) on the global level
3367bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3368    layout = VK_IMAGE_LAYOUT_MAX_ENUM;
3369    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3370    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3371    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3372    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3373    if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3374        imgpair = {imgpair.image, false, VkImageSubresource()};
3375        auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3376        if (imgsubIt == my_data->imageLayoutMap.end())
3377            return false;
3378        layout = imgsubIt->second.layout;
3379    }
3380    return true;
3381}
3382
3383bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3384    ImageSubresourcePair imgpair = {image, true, range};
3385    return FindLayout(my_data, imgpair, layout);
3386}
3387
3388bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3389    auto sub_data = my_data->imageSubresourceMap.find(image);
3390    if (sub_data == my_data->imageSubresourceMap.end())
3391        return false;
3392    auto img_node = getImageNode(my_data, image);
3393    if (!img_node)
3394        return false;
3395    bool ignoreGlobal = false;
3396    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3397    // potential errors in this case.
3398    if (sub_data->second.size() >= (img_node->createInfo.arrayLayers * img_node->createInfo.mipLevels + 1)) {
3399        ignoreGlobal = true;
3400    }
3401    for (auto imgsubpair : sub_data->second) {
3402        if (ignoreGlobal && !imgsubpair.hasSubresource)
3403            continue;
3404        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3405        if (img_data != my_data->imageLayoutMap.end()) {
3406            layouts.push_back(img_data->second.layout);
3407        }
3408    }
3409    return true;
3410}
3411
3412// Set the layout on the global level
3413void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3414    VkImage &image = imgpair.image;
3415    // TODO (mlentine): Maybe set format if new? Not used atm.
3416    my_data->imageLayoutMap[imgpair].layout = layout;
3417    // TODO (mlentine): Maybe make vector a set?
3418    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3419    if (subresource == my_data->imageSubresourceMap[image].end()) {
3420        my_data->imageSubresourceMap[image].push_back(imgpair);
3421    }
3422}
3423
3424// Set the layout on the cmdbuf level
3425void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3426    pCB->imageLayoutMap[imgpair] = node;
3427    // TODO (mlentine): Maybe make vector a set?
3428    auto subresource =
3429        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3430    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3431        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3432    }
3433}
3434
3435void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3436    // TODO (mlentine): Maybe make vector a set?
3437    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3438        pCB->imageSubresourceMap[imgpair.image].end()) {
3439        pCB->imageLayoutMap[imgpair].layout = layout;
3440    } else {
3441        // TODO (mlentine): Could be expensive and might need to be removed.
3442        assert(imgpair.hasSubresource);
3443        IMAGE_CMD_BUF_LAYOUT_NODE node;
3444        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3445            node.initialLayout = layout;
3446        }
3447        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3448    }
3449}
3450
3451template <class OBJECT, class LAYOUT>
3452void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3453    if (imgpair.subresource.aspectMask & aspectMask) {
3454        imgpair.subresource.aspectMask = aspectMask;
3455        SetLayout(pObject, imgpair, layout);
3456    }
3457}
3458
3459template <class OBJECT, class LAYOUT>
3460void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3461    ImageSubresourcePair imgpair = {image, true, range};
3462    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3463    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3464    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3465    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3466}
3467
3468template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3469    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3470    SetLayout(pObject, image, imgpair, layout);
3471}
3472
3473void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3474    auto iv_data = getImageViewData(dev_data, imageView);
3475    assert(iv_data);
3476    const VkImage &image = iv_data->image;
3477    const VkImageSubresourceRange &subRange = iv_data->subresourceRange;
3478    // TODO: Do not iterate over every possibility - consolidate where possible
3479    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3480        uint32_t level = subRange.baseMipLevel + j;
3481        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3482            uint32_t layer = subRange.baseArrayLayer + k;
3483            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3484            SetLayout(pCB, image, sub, layout);
3485        }
3486    }
3487}
3488
3489// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3490// func_str is the name of the calling function
3491// Return false if no errors occur
3492// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
3493static bool validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
3494    bool skip_call = false;
3495    auto set_node = my_data->setMap.find(set);
3496    if (set_node == my_data->setMap.end()) {
3497        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3498                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3499                             "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3500                             (uint64_t)(set));
3501    } else {
3502        if (set_node->second->in_use.load()) {
3503            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3504                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
3505                                 "DS", "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer.",
3506                                 func_str.c_str(), (uint64_t)(set));
3507        }
3508    }
3509    return skip_call;
3510}
3511
3512// Remove set from setMap and delete the set
3513static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
3514    dev_data->setMap.erase(descriptor_set->GetSet());
3515    delete descriptor_set;
3516}
3517// Free all DS Pools including their Sets & related sub-structs
3518// NOTE : Calls to this function should be wrapped in mutex
3519static void deletePools(layer_data *my_data) {
3520    if (my_data->descriptorPoolMap.size() <= 0)
3521        return;
3522    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
3523        // Remove this pools' sets from setMap and delete them
3524        for (auto ds : (*ii).second->sets) {
3525            freeDescriptorSet(my_data, ds);
3526        }
3527        (*ii).second->sets.clear();
3528    }
3529    my_data->descriptorPoolMap.clear();
3530}
3531
3532static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
3533                                VkDescriptorPoolResetFlags flags) {
3534    DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
3535    if (!pPool) {
3536        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
3537                (uint64_t)pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
3538                "Unable to find pool node for pool 0x%" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t)pool);
3539    } else {
3540        // TODO: validate flags
3541        // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
3542        for (auto ds : pPool->sets) {
3543            freeDescriptorSet(my_data, ds);
3544        }
3545        pPool->sets.clear();
3546        // Reset available count for each type and available sets for this pool
3547        for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
3548            pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
3549        }
3550        pPool->availableSets = pPool->maxSets;
3551    }
3552}
3553
3554// For given CB object, fetch associated CB Node from map
3555static GLOBAL_CB_NODE *getCBNode(layer_data const *my_data, const VkCommandBuffer cb) {
3556    auto it = my_data->commandBufferMap.find(cb);
3557    if (it == my_data->commandBufferMap.end()) {
3558        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3559                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3560                "Attempt to use CommandBuffer 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
3561        return NULL;
3562    }
3563    return it->second;
3564}
3565// Free all CB Nodes
3566// NOTE : Calls to this function should be wrapped in mutex
3567static void deleteCommandBuffers(layer_data *my_data) {
3568    if (my_data->commandBufferMap.empty()) {
3569        return;
3570    }
3571    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
3572        delete (*ii).second;
3573    }
3574    my_data->commandBufferMap.clear();
3575}
3576
3577static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
3578    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3579                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
3580                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
3581}
3582
3583bool validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
3584    if (!pCB->activeRenderPass)
3585        return false;
3586    bool skip_call = false;
3587    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
3588        (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
3589        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3590                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3591                             "Commands cannot be called in a subpass using secondary command buffers.");
3592    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
3593        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3594                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3595                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
3596    }
3597    return skip_call;
3598}
3599
3600static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3601    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
3602        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3603                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3604                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3605    return false;
3606}
3607
3608static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3609    if (!(flags & VK_QUEUE_COMPUTE_BIT))
3610        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3611                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3612                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
3613    return false;
3614}
3615
3616static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3617    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
3618        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3619                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3620                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3621    return false;
3622}
3623
3624// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
3625//  in the recording state or if there's an issue with the Cmd ordering
3626static bool addCmd(layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
3627    bool skipCall = false;
3628    auto pPool = getCommandPoolNode(my_data, pCB->createInfo.commandPool);
3629    if (pPool) {
3630        VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].queueFlags;
3631        switch (cmd) {
3632        case CMD_BINDPIPELINE:
3633        case CMD_BINDPIPELINEDELTA:
3634        case CMD_BINDDESCRIPTORSETS:
3635        case CMD_FILLBUFFER:
3636        case CMD_CLEARCOLORIMAGE:
3637        case CMD_SETEVENT:
3638        case CMD_RESETEVENT:
3639        case CMD_WAITEVENTS:
3640        case CMD_BEGINQUERY:
3641        case CMD_ENDQUERY:
3642        case CMD_RESETQUERYPOOL:
3643        case CMD_COPYQUERYPOOLRESULTS:
3644        case CMD_WRITETIMESTAMP:
3645            skipCall |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3646            break;
3647        case CMD_SETVIEWPORTSTATE:
3648        case CMD_SETSCISSORSTATE:
3649        case CMD_SETLINEWIDTHSTATE:
3650        case CMD_SETDEPTHBIASSTATE:
3651        case CMD_SETBLENDSTATE:
3652        case CMD_SETDEPTHBOUNDSSTATE:
3653        case CMD_SETSTENCILREADMASKSTATE:
3654        case CMD_SETSTENCILWRITEMASKSTATE:
3655        case CMD_SETSTENCILREFERENCESTATE:
3656        case CMD_BINDINDEXBUFFER:
3657        case CMD_BINDVERTEXBUFFER:
3658        case CMD_DRAW:
3659        case CMD_DRAWINDEXED:
3660        case CMD_DRAWINDIRECT:
3661        case CMD_DRAWINDEXEDINDIRECT:
3662        case CMD_BLITIMAGE:
3663        case CMD_CLEARATTACHMENTS:
3664        case CMD_CLEARDEPTHSTENCILIMAGE:
3665        case CMD_RESOLVEIMAGE:
3666        case CMD_BEGINRENDERPASS:
3667        case CMD_NEXTSUBPASS:
3668        case CMD_ENDRENDERPASS:
3669            skipCall |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
3670            break;
3671        case CMD_DISPATCH:
3672        case CMD_DISPATCHINDIRECT:
3673            skipCall |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3674            break;
3675        case CMD_COPYBUFFER:
3676        case CMD_COPYIMAGE:
3677        case CMD_COPYBUFFERTOIMAGE:
3678        case CMD_COPYIMAGETOBUFFER:
3679        case CMD_CLONEIMAGEDATA:
3680        case CMD_UPDATEBUFFER:
3681        case CMD_PIPELINEBARRIER:
3682        case CMD_EXECUTECOMMANDS:
3683        case CMD_END:
3684            break;
3685        default:
3686            break;
3687        }
3688    }
3689    if (pCB->state != CB_RECORDING) {
3690        skipCall |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
3691    } else {
3692        skipCall |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
3693        CMD_NODE cmdNode = {};
3694        // init cmd node and append to end of cmd LL
3695        cmdNode.cmdNumber = ++pCB->numCmds;
3696        cmdNode.type = cmd;
3697        pCB->cmds.push_back(cmdNode);
3698    }
3699    return skipCall;
3700}
3701// Reset the command buffer state
3702//  Maintain the createInfo and set state to CB_NEW, but clear all other state
3703static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
3704    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
3705    if (pCB) {
3706        pCB->in_use.store(0);
3707        pCB->cmds.clear();
3708        // Reset CB state (note that createInfo is not cleared)
3709        pCB->commandBuffer = cb;
3710        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
3711        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
3712        pCB->numCmds = 0;
3713        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
3714        pCB->state = CB_NEW;
3715        pCB->submitCount = 0;
3716        pCB->status = 0;
3717        pCB->viewports.clear();
3718        pCB->scissors.clear();
3719
3720        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
3721            // Before clearing lastBoundState, remove any CB bindings from all uniqueBoundSets
3722            for (auto set : pCB->lastBound[i].uniqueBoundSets) {
3723                set->RemoveBoundCommandBuffer(pCB);
3724            }
3725            pCB->lastBound[i].reset();
3726        }
3727
3728        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
3729        pCB->activeRenderPass = nullptr;
3730        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
3731        pCB->activeSubpass = 0;
3732        pCB->destroyedSets.clear();
3733        pCB->updatedSets.clear();
3734        pCB->destroyedFramebuffers.clear();
3735        pCB->waitedEvents.clear();
3736        pCB->events.clear();
3737        pCB->writeEventsBeforeWait.clear();
3738        pCB->waitedEventsBeforeQueryReset.clear();
3739        pCB->queryToStateMap.clear();
3740        pCB->activeQueries.clear();
3741        pCB->startedQueries.clear();
3742        pCB->imageSubresourceMap.clear();
3743        pCB->imageLayoutMap.clear();
3744        pCB->eventToStageMap.clear();
3745        pCB->drawData.clear();
3746        pCB->currentDrawData.buffers.clear();
3747        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
3748        // Make sure any secondaryCommandBuffers are removed from globalInFlight
3749        for (auto secondary_cb : pCB->secondaryCommandBuffers) {
3750            dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
3751        }
3752        pCB->secondaryCommandBuffers.clear();
3753        pCB->updateImages.clear();
3754        pCB->updateBuffers.clear();
3755        clear_cmd_buf_and_mem_references(dev_data, pCB);
3756        pCB->eventUpdates.clear();
3757        pCB->queryUpdates.clear();
3758
3759        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
3760        for (auto framebuffer : pCB->framebuffers) {
3761            auto fbNode = getFramebuffer(dev_data, framebuffer);
3762            if (fbNode)
3763                fbNode->referencingCmdBuffers.erase(pCB->commandBuffer);
3764        }
3765        pCB->framebuffers.clear();
3766        pCB->activeFramebuffer = VK_NULL_HANDLE;
3767    }
3768}
3769
3770// Set PSO-related status bits for CB, including dynamic state set via PSO
3771static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
3772    // Account for any dynamic state not set via this PSO
3773    if (!pPipe->graphicsPipelineCI.pDynamicState ||
3774        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static
3775        pCB->status = CBSTATUS_ALL;
3776    } else {
3777        // First consider all state on
3778        // Then unset any state that's noted as dynamic in PSO
3779        // Finally OR that into CB statemask
3780        CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
3781        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
3782            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
3783            case VK_DYNAMIC_STATE_VIEWPORT:
3784                psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET;
3785                break;
3786            case VK_DYNAMIC_STATE_SCISSOR:
3787                psoDynStateMask &= ~CBSTATUS_SCISSOR_SET;
3788                break;
3789            case VK_DYNAMIC_STATE_LINE_WIDTH:
3790                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
3791                break;
3792            case VK_DYNAMIC_STATE_DEPTH_BIAS:
3793                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
3794                break;
3795            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
3796                psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
3797                break;
3798            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
3799                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
3800                break;
3801            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
3802                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
3803                break;
3804            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
3805                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
3806                break;
3807            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
3808                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
3809                break;
3810            default:
3811                // TODO : Flag error here
3812                break;
3813            }
3814        }
3815        pCB->status |= psoDynStateMask;
3816    }
3817}
3818
3819// Print the last bound Gfx Pipeline
3820static bool printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
3821    bool skipCall = false;
3822    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
3823    if (pCB) {
3824        PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
3825        if (!pPipeTrav) {
3826            // nothing to print
3827        } else {
3828            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3829                                __LINE__, DRAWSTATE_NONE, "DS", "%s",
3830                                vk_print_vkgraphicspipelinecreateinfo(
3831                                    reinterpret_cast<const VkGraphicsPipelineCreateInfo *>(&pPipeTrav->graphicsPipelineCI), "{DS}")
3832                                    .c_str());
3833        }
3834    }
3835    return skipCall;
3836}
3837
3838static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
3839    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
3840    if (pCB && pCB->cmds.size() > 0) {
3841        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3842                DRAWSTATE_NONE, "DS", "Cmds in CB 0x%p", (void *)cb);
3843        vector<CMD_NODE> cmds = pCB->cmds;
3844        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
3845            // TODO : Need to pass cb as srcObj here
3846            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
3847                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD 0x%" PRIx64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
3848        }
3849    } else {
3850        // Nothing to print
3851    }
3852}
3853
3854static bool synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
3855    bool skipCall = false;
3856    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
3857        return skipCall;
3858    }
3859    skipCall |= printPipeline(my_data, cb);
3860    return skipCall;
3861}
3862
3863// Flags validation error if the associated call is made inside a render pass. The apiName
3864// routine should ONLY be called outside a render pass.
3865static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
3866    bool inside = false;
3867    if (pCB->activeRenderPass) {
3868        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3869                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
3870                         "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 ")", apiName,
3871                         (uint64_t)pCB->activeRenderPass->renderPass);
3872    }
3873    return inside;
3874}
3875
3876// Flags validation error if the associated call is made outside a render pass. The apiName
3877// routine should ONLY be called inside a render pass.
3878static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
3879    bool outside = false;
3880    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
3881        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
3882         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
3883        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3884                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
3885                          "%s: This call must be issued inside an active render pass.", apiName);
3886    }
3887    return outside;
3888}
3889
3890static void init_core_validation(layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
3891
3892    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
3893
3894}
3895
3896VKAPI_ATTR VkResult VKAPI_CALL
3897CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
3898    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
3899
3900    assert(chain_info->u.pLayerInfo);
3901    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
3902    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
3903    if (fpCreateInstance == NULL)
3904        return VK_ERROR_INITIALIZATION_FAILED;
3905
3906    // Advance the link info for the next element on the chain
3907    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
3908
3909    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
3910    if (result != VK_SUCCESS)
3911        return result;
3912
3913    layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
3914    instance_data->instance = *pInstance;
3915    instance_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
3916    layer_init_instance_dispatch_table(*pInstance, instance_data->instance_dispatch_table, fpGetInstanceProcAddr);
3917
3918    instance_data->report_data =
3919        debug_report_create_instance(instance_data->instance_dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount,
3920                                     pCreateInfo->ppEnabledExtensionNames);
3921
3922    init_core_validation(instance_data, pAllocator);
3923
3924    ValidateLayerOrdering(*pCreateInfo);
3925
3926    return result;
3927}
3928
3929/* hook DestroyInstance to remove tableInstanceMap entry */
3930VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
3931    // TODOSC : Shouldn't need any customization here
3932    dispatch_key key = get_dispatch_key(instance);
3933    // TBD: Need any locking this early, in case this function is called at the
3934    // same time by more than one thread?
3935    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
3936    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
3937    pTable->DestroyInstance(instance, pAllocator);
3938
3939    std::lock_guard<std::mutex> lock(global_lock);
3940    // Clean up logging callback, if any
3941    while (my_data->logging_callback.size() > 0) {
3942        VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
3943        layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
3944        my_data->logging_callback.pop_back();
3945    }
3946
3947    layer_debug_report_destroy_instance(my_data->report_data);
3948    delete my_data->instance_dispatch_table;
3949    layer_data_map.erase(key);
3950}
3951
3952static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
3953    uint32_t i;
3954    // TBD: Need any locking, in case this function is called at the same time
3955    // by more than one thread?
3956    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
3957    dev_data->device_extensions.wsi_enabled = false;
3958
3959    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
3960    PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
3961    pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
3962    pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
3963    pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
3964    pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
3965    pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
3966
3967    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
3968        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
3969            dev_data->device_extensions.wsi_enabled = true;
3970    }
3971}
3972
3973VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
3974                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
3975    layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
3976    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
3977
3978    assert(chain_info->u.pLayerInfo);
3979    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
3980    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
3981    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice");
3982    if (fpCreateDevice == NULL) {
3983        return VK_ERROR_INITIALIZATION_FAILED;
3984    }
3985
3986    // Advance the link info for the next element on the chain
3987    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
3988
3989    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
3990    if (result != VK_SUCCESS) {
3991        return result;
3992    }
3993
3994    std::unique_lock<std::mutex> lock(global_lock);
3995    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
3996
3997    // Setup device dispatch table
3998    my_device_data->device_dispatch_table = new VkLayerDispatchTable;
3999    layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
4000    my_device_data->device = *pDevice;
4001
4002    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4003    createDeviceRegisterExtensions(pCreateInfo, *pDevice);
4004    // Get physical device limits for this device
4005    my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties));
4006    uint32_t count;
4007    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4008    my_device_data->phys_dev_properties.queue_family_properties.resize(count);
4009    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
4010        gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]);
4011    // TODO: device limits should make sure these are compatible
4012    if (pCreateInfo->pEnabledFeatures) {
4013        my_device_data->phys_dev_properties.features = *pCreateInfo->pEnabledFeatures;
4014    } else {
4015        memset(&my_device_data->phys_dev_properties.features, 0, sizeof(VkPhysicalDeviceFeatures));
4016    }
4017    // Store physical device mem limits into device layer_data struct
4018    my_instance_data->instance_dispatch_table->GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props);
4019    lock.unlock();
4020
4021    ValidateLayerOrdering(*pCreateInfo);
4022
4023    return result;
4024}
4025
4026// prototype
4027static void deleteRenderPasses(layer_data *);
4028VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4029    // TODOSC : Shouldn't need any customization here
4030    dispatch_key key = get_dispatch_key(device);
4031    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4032    // Free all the memory
4033    std::unique_lock<std::mutex> lock(global_lock);
4034    deletePipelines(dev_data);
4035    deleteRenderPasses(dev_data);
4036    deleteCommandBuffers(dev_data);
4037    // This will also delete all sets in the pool & remove them from setMap
4038    deletePools(dev_data);
4039    // All sets should be removed
4040    assert(dev_data->setMap.empty());
4041    for (auto del_layout : dev_data->descriptorSetLayoutMap) {
4042        delete del_layout.second;
4043    }
4044    dev_data->descriptorSetLayoutMap.clear();
4045    dev_data->imageViewMap.clear();
4046    dev_data->imageMap.clear();
4047    dev_data->imageSubresourceMap.clear();
4048    dev_data->imageLayoutMap.clear();
4049    dev_data->bufferViewMap.clear();
4050    dev_data->bufferMap.clear();
4051    // Queues persist until device is destroyed
4052    dev_data->queueMap.clear();
4053    lock.unlock();
4054#if MTMERGESOURCE
4055    bool skipCall = false;
4056    lock.lock();
4057    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4058            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4059    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4060            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4061    print_mem_list(dev_data);
4062    printCBList(dev_data);
4063    // Report any memory leaks
4064    DEVICE_MEM_INFO *pInfo = NULL;
4065    if (!dev_data->memObjMap.empty()) {
4066        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4067            pInfo = (*ii).second.get();
4068            if (pInfo->allocInfo.allocationSize != 0) {
4069                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4070                skipCall |=
4071                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4072                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK,
4073                            "MEM", "Mem Object 0x%" PRIx64 " has not been freed. You should clean up this memory by calling "
4074                                   "vkFreeMemory(0x%" PRIx64 ") prior to vkDestroyDevice().",
4075                            (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4076            }
4077        }
4078    }
4079    layer_debug_report_destroy_device(device);
4080    lock.unlock();
4081
4082#if DISPATCH_MAP_DEBUG
4083    fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
4084#endif
4085    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4086    if (!skipCall) {
4087        pDisp->DestroyDevice(device, pAllocator);
4088    }
4089#else
4090    dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
4091#endif
4092    delete dev_data->device_dispatch_table;
4093    layer_data_map.erase(key);
4094}
4095
4096static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4097
4098// This validates that the initial layout specified in the command buffer for
4099// the IMAGE is the same
4100// as the global IMAGE layout
4101static bool ValidateCmdBufImageLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4102    bool skip_call = false;
4103    for (auto cb_image_data : pCB->imageLayoutMap) {
4104        VkImageLayout imageLayout;
4105        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4106            skip_call |=
4107                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4108                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image 0x%" PRIx64 ".",
4109                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4110        } else {
4111            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4112                // TODO: Set memory invalid which is in mem_tracker currently
4113            } else if (imageLayout != cb_image_data.second.initialLayout) {
4114                if (cb_image_data.first.hasSubresource) {
4115                    skip_call |= log_msg(
4116                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4117                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4118                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") [sub-resource: aspectMask 0x%X array layer %u, mip level %u], "
4119                        "with layout %s when first use is %s.",
4120                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.aspectMask,
4121                                cb_image_data.first.subresource.arrayLayer,
4122                                cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout),
4123                        string_VkImageLayout(cb_image_data.second.initialLayout));
4124                } else {
4125                    skip_call |= log_msg(
4126                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4127                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4128                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") with layout %s when "
4129                        "first use is %s.",
4130                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4131                        string_VkImageLayout(cb_image_data.second.initialLayout));
4132                }
4133            }
4134            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4135        }
4136    }
4137    return skip_call;
4138}
4139
4140// Track which resources are in-flight by atomically incrementing their "in_use" count
4141static bool validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB, std::vector<VkSemaphore> const &semaphores) {
4142    bool skip_call = false;
4143    for (auto drawDataElement : pCB->drawData) {
4144        for (auto buffer : drawDataElement.buffers) {
4145            auto buffer_node = getBufferNode(my_data, buffer);
4146            if (!buffer_node) {
4147                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4148                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4149                                     "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer));
4150            } else {
4151                buffer_node->in_use.fetch_add(1);
4152            }
4153        }
4154    }
4155    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4156        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4157            if (!my_data->setMap.count(set->GetSet())) {
4158                skip_call |=
4159                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4160                            (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS",
4161                            "Cannot submit cmd buffer using deleted descriptor set 0x%" PRIx64 ".", (uint64_t)(set));
4162            } else {
4163                set->in_use.fetch_add(1);
4164            }
4165        }
4166    }
4167    for (auto semaphore : semaphores) {
4168        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4169        if (semaphoreNode == my_data->semaphoreMap.end()) {
4170            skip_call |=
4171                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4172                        reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4173                        "Cannot submit cmd buffer using deleted semaphore 0x%" PRIx64 ".", reinterpret_cast<uint64_t &>(semaphore));
4174        } else {
4175            semaphoreNode->second.in_use.fetch_add(1);
4176        }
4177    }
4178    for (auto event : pCB->events) {
4179        auto eventNode = my_data->eventMap.find(event);
4180        if (eventNode == my_data->eventMap.end()) {
4181            skip_call |=
4182                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4183                        reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4184                        "Cannot submit cmd buffer using deleted event 0x%" PRIx64 ".", reinterpret_cast<uint64_t &>(event));
4185        } else {
4186            eventNode->second.in_use.fetch_add(1);
4187        }
4188    }
4189    for (auto event : pCB->writeEventsBeforeWait) {
4190        auto eventNode = my_data->eventMap.find(event);
4191        eventNode->second.write_in_use++;
4192    }
4193    return skip_call;
4194}
4195
4196// Note: This function assumes that the global lock is held by the calling
4197// thread.
4198static bool cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4199    bool skip_call = false;
4200    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4201    if (pCB) {
4202        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
4203            for (auto event : queryEventsPair.second) {
4204                if (my_data->eventMap[event].needsSignaled) {
4205                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4206                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
4207                                         "Cannot get query results on queryPool 0x%" PRIx64
4208                                         " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
4209                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
4210                }
4211            }
4212        }
4213    }
4214    return skip_call;
4215}
4216// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
4217static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
4218    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
4219    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer);
4220    pCB->in_use.fetch_sub(1);
4221    if (!pCB->in_use.load()) {
4222        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
4223    }
4224}
4225
4226static void decrementResources(layer_data *my_data, CB_SUBMISSION *submission) {
4227    GLOBAL_CB_NODE *pCB = getCBNode(my_data, submission->cb);
4228    for (auto drawDataElement : pCB->drawData) {
4229        for (auto buffer : drawDataElement.buffers) {
4230            auto buffer_node = getBufferNode(my_data, buffer);
4231            if (buffer_node) {
4232                buffer_node->in_use.fetch_sub(1);
4233            }
4234        }
4235    }
4236    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4237        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4238            set->in_use.fetch_sub(1);
4239        }
4240    }
4241    for (auto semaphore : submission->semaphores) {
4242        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4243        if (semaphoreNode != my_data->semaphoreMap.end()) {
4244            semaphoreNode->second.in_use.fetch_sub(1);
4245        }
4246    }
4247    for (auto event : pCB->events) {
4248        auto eventNode = my_data->eventMap.find(event);
4249        if (eventNode != my_data->eventMap.end()) {
4250            eventNode->second.in_use.fetch_sub(1);
4251        }
4252    }
4253    for (auto event : pCB->writeEventsBeforeWait) {
4254        auto eventNode = my_data->eventMap.find(event);
4255        if (eventNode != my_data->eventMap.end()) {
4256            eventNode->second.write_in_use--;
4257        }
4258    }
4259    for (auto queryStatePair : pCB->queryToStateMap) {
4260        my_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4261    }
4262    for (auto eventStagePair : pCB->eventToStageMap) {
4263        my_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4264    }
4265}
4266// For fenceCount fences in pFences, mark fence signaled, decrement in_use, and call
4267//  decrementResources for all priorFences and cmdBuffers associated with fence.
4268static bool decrementResources(layer_data *my_data, uint32_t fenceCount, const VkFence *pFences) {
4269    bool skip_call = false;
4270    std::vector<std::pair<VkFence, FENCE_NODE *>> fence_pairs;
4271    for (uint32_t i = 0; i < fenceCount; ++i) {
4272        auto pFence = getFenceNode(my_data, pFences[i]);
4273        if (!pFence || pFence->state != FENCE_INFLIGHT)
4274            continue;
4275
4276        fence_pairs.emplace_back(pFences[i], pFence);
4277        pFence->state = FENCE_RETIRED;
4278
4279        decrementResources(my_data, static_cast<uint32_t>(pFence->priorFences.size()),
4280                           pFence->priorFences.data());
4281        for (auto & submission : pFence->submissions) {
4282            decrementResources(my_data, &submission);
4283            skip_call |= cleanInFlightCmdBuffer(my_data, submission.cb);
4284            removeInFlightCmdBuffer(my_data, submission.cb);
4285        }
4286        pFence->submissions.clear();
4287        pFence->priorFences.clear();
4288    }
4289    for (auto fence_pair : fence_pairs) {
4290        for (auto queue : fence_pair.second->queues) {
4291            auto pQueue = getQueueNode(my_data, queue);
4292            if (pQueue) {
4293                auto last_fence_data =
4294                    std::find(pQueue->lastFences.begin(), pQueue->lastFences.end(), fence_pair.first);
4295                if (last_fence_data != pQueue->lastFences.end())
4296                    pQueue->lastFences.erase(last_fence_data);
4297            }
4298        }
4299        for (auto& fence_data : my_data->fenceMap) {
4300          auto prior_fence_data =
4301              std::find(fence_data.second.priorFences.begin(), fence_data.second.priorFences.end(), fence_pair.first);
4302          if (prior_fence_data != fence_data.second.priorFences.end())
4303              fence_data.second.priorFences.erase(prior_fence_data);
4304        }
4305    }
4306    return skip_call;
4307}
4308// Decrement in_use for all outstanding cmd buffers that were submitted on this queue
4309static bool decrementResources(layer_data *my_data, VkQueue queue) {
4310    bool skip_call = false;
4311    auto queue_data = my_data->queueMap.find(queue);
4312    if (queue_data != my_data->queueMap.end()) {
4313        for (auto & submission : queue_data->second.untrackedSubmissions) {
4314            decrementResources(my_data, &submission);
4315            skip_call |= cleanInFlightCmdBuffer(my_data, submission.cb);
4316            removeInFlightCmdBuffer(my_data, submission.cb);
4317        }
4318        queue_data->second.untrackedSubmissions.clear();
4319        skip_call |= decrementResources(my_data, static_cast<uint32_t>(queue_data->second.lastFences.size()),
4320                                        queue_data->second.lastFences.data());
4321    }
4322    return skip_call;
4323}
4324
4325// This function merges command buffer tracking between queues when there is a semaphore dependency
4326// between them (see below for details as to how tracking works). When this happens, the prior
4327// fences from the signaling queue are merged into the wait queue as well as any untracked command
4328// buffers.
4329static void updateTrackedCommandBuffers(layer_data *dev_data, VkQueue queue, VkQueue other_queue, VkFence fence) {
4330    if (queue == other_queue) {
4331        return;
4332    }
4333    auto queue_data = dev_data->queueMap.find(queue);
4334    auto other_queue_data = dev_data->queueMap.find(other_queue);
4335    if (queue_data == dev_data->queueMap.end() || other_queue_data == dev_data->queueMap.end()) {
4336        return;
4337    }
4338    for (auto fenceInner : other_queue_data->second.lastFences) {
4339        queue_data->second.lastFences.push_back(fenceInner);
4340        auto fence_node = dev_data->fenceMap.find(fenceInner);
4341        if (fence_node != dev_data->fenceMap.end()) {
4342            fence_node->second.queues.insert(other_queue_data->first);
4343        }
4344    }
4345    // TODO: Stealing the untracked CBs out of the signaling queue isn't really
4346    // correct. A subsequent submission + wait, or a QWI on that queue, or
4347    // another semaphore dependency to a third queue may /all/ provide
4348    // suitable proof that the work we're stealing here has completed on the
4349    // device, but we've lost that information by moving the tracking between
4350    // queues.
4351    if (fence != VK_NULL_HANDLE) {
4352        auto fence_data = dev_data->fenceMap.find(fence);
4353        if (fence_data == dev_data->fenceMap.end()) {
4354            return;
4355        }
4356        for (auto cmdbuffer : other_queue_data->second.untrackedSubmissions) {
4357            fence_data->second.submissions.push_back(cmdbuffer);
4358        }
4359        other_queue_data->second.untrackedSubmissions.clear();
4360    } else {
4361        for (auto cmdbuffer : other_queue_data->second.untrackedSubmissions) {
4362            queue_data->second.untrackedSubmissions.push_back(cmdbuffer);
4363        }
4364        other_queue_data->second.untrackedSubmissions.clear();
4365    }
4366    for (auto eventStagePair : other_queue_data->second.eventToStageMap) {
4367        queue_data->second.eventToStageMap[eventStagePair.first] = eventStagePair.second;
4368    }
4369    for (auto queryStatePair : other_queue_data->second.queryToStateMap) {
4370        queue_data->second.queryToStateMap[queryStatePair.first] = queryStatePair.second;
4371    }
4372}
4373
4374// This is the core function for tracking command buffers. There are two primary ways command
4375// buffers are tracked. When submitted they are stored in the command buffer list associated
4376// with a fence or the untracked command buffer list associated with a queue if no fence is used.
4377// Each queue also stores the last fence that was submitted onto the queue. This allows us to
4378// create a linked list of fences and their associated command buffers so if one fence is
4379// waited on, prior fences on that queue are also considered to have been waited on. When a fence is
4380// waited on (either via a queue, device or fence), we free the cmd buffers for that fence and
4381// recursively call with the prior fences.
4382
4383
4384// Submit a fence to a queue, delimiting previous fences and previous untracked
4385// work by it.
4386static void
4387SubmitFence(QUEUE_NODE *pQueue, FENCE_NODE *pFence)
4388{
4389    assert(!pFence->priorFences.size());
4390    assert(!pFence->submissions.size());
4391
4392    std::swap(pFence->priorFences, pQueue->lastFences);
4393    std::swap(pFence->submissions, pQueue->untrackedSubmissions);
4394
4395    pFence->queues.insert(pQueue->queue);
4396    pFence->state = FENCE_INFLIGHT;
4397
4398    pQueue->lastFences.push_back(pFence->fence);
4399}
4400
4401static void markCommandBuffersInFlight(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
4402                                       VkFence fence) {
4403    auto queue_data = my_data->queueMap.find(queue);
4404    if (queue_data != my_data->queueMap.end()) {
4405        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4406            const VkSubmitInfo *submit = &pSubmits[submit_idx];
4407            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
4408                // Add cmdBuffers to the global set and increment count
4409                GLOBAL_CB_NODE *pCB = getCBNode(my_data, submit->pCommandBuffers[i]);
4410                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
4411                    my_data->globalInFlightCmdBuffers.insert(secondaryCmdBuffer);
4412                    GLOBAL_CB_NODE *pSubCB = getCBNode(my_data, secondaryCmdBuffer);
4413                    pSubCB->in_use.fetch_add(1);
4414                }
4415                my_data->globalInFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
4416                pCB->in_use.fetch_add(1);
4417            }
4418        }
4419    }
4420}
4421
4422static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4423    bool skip_call = false;
4424    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
4425        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4426        skip_call |=
4427            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4428                    __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
4429                    "Command Buffer 0x%" PRIx64 " is already in use and is not marked for simultaneous use.",
4430                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
4431    }
4432    return skip_call;
4433}
4434
4435static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4436    bool skipCall = false;
4437    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
4438    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
4439        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4440                            __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4441                            "CB 0x%" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
4442                            "set, but has been submitted 0x%" PRIxLEAST64 " times.",
4443                            (uint64_t)(pCB->commandBuffer), pCB->submitCount);
4444    }
4445    // Validate that cmd buffers have been updated
4446    if (CB_RECORDED != pCB->state) {
4447        if (CB_INVALID == pCB->state) {
4448            // Inform app of reason CB invalid
4449            bool causeReported = false;
4450            if (!pCB->destroyedSets.empty()) {
4451                std::stringstream set_string;
4452                for (auto set : pCB->destroyedSets)
4453                    set_string << " " << set;
4454
4455                skipCall |=
4456                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4457                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4458                            "You are submitting command buffer 0x%" PRIxLEAST64
4459                            " that is invalid because it had the following bound descriptor set(s) destroyed: %s",
4460                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
4461                causeReported = true;
4462            }
4463            if (!pCB->updatedSets.empty()) {
4464                std::stringstream set_string;
4465                for (auto set : pCB->updatedSets)
4466                    set_string << " " << set;
4467
4468                skipCall |=
4469                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4470                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4471                            "You are submitting command buffer 0x%" PRIxLEAST64
4472                            " that is invalid because it had the following bound descriptor set(s) updated: %s",
4473                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
4474                causeReported = true;
4475            }
4476            if (!pCB->destroyedFramebuffers.empty()) {
4477                std::stringstream fb_string;
4478                for (auto fb : pCB->destroyedFramebuffers)
4479                    fb_string << " " << fb;
4480
4481                skipCall |=
4482                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4483                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4484                            "You are submitting command buffer 0x%" PRIxLEAST64 " that is invalid because it had the following "
4485                            "referenced framebuffers destroyed: %s",
4486                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), fb_string.str().c_str());
4487                causeReported = true;
4488            }
4489            // TODO : This is defensive programming to make sure an error is
4490            //  flagged if we hit this INVALID cmd buffer case and none of the
4491            //  above cases are hit. As the number of INVALID cases grows, this
4492            //  code should be updated to seemlessly handle all the cases.
4493            if (!causeReported) {
4494                skipCall |= log_msg(
4495                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4496                    reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4497                    "You are submitting command buffer 0x%" PRIxLEAST64 " that is invalid due to an unknown cause. Validation "
4498                    "should "
4499                    "be improved to report the exact cause.",
4500                    reinterpret_cast<uint64_t &>(pCB->commandBuffer));
4501            }
4502        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
4503            skipCall |=
4504                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4505                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
4506                        "You must call vkEndCommandBuffer() on CB 0x%" PRIxLEAST64 " before this call to vkQueueSubmit()!",
4507                        (uint64_t)(pCB->commandBuffer));
4508        }
4509    }
4510    return skipCall;
4511}
4512
4513static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, std::vector<VkSemaphore> const &semaphores) {
4514    // Track in-use for resources off of primary and any secondary CBs
4515    bool skipCall = validateAndIncrementResources(dev_data, pCB, semaphores);
4516    if (!pCB->secondaryCommandBuffers.empty()) {
4517        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
4518            skipCall |= validateAndIncrementResources(dev_data, dev_data->commandBufferMap[secondaryCmdBuffer], semaphores);
4519            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
4520            if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
4521                !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4522                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4523                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4524                        "CB 0x%" PRIxLEAST64 " was submitted with secondary buffer 0x%" PRIxLEAST64
4525                        " but that buffer has subsequently been bound to "
4526                        "primary cmd buffer 0x%" PRIxLEAST64
4527                        " and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
4528                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
4529                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
4530            }
4531        }
4532    }
4533    skipCall |= validateCommandBufferState(dev_data, pCB);
4534    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
4535    // on device
4536    skipCall |= validateCommandBufferSimultaneousUse(dev_data, pCB);
4537    return skipCall;
4538}
4539
4540static bool
4541ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence)
4542{
4543    bool skipCall = false;
4544
4545    if (pFence) {
4546        if (pFence->state == FENCE_INFLIGHT) {
4547            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4548                                (uint64_t)(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4549                                "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(pFence->fence));
4550        }
4551
4552        else if (pFence->state == FENCE_RETIRED) {
4553            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4554                                reinterpret_cast<uint64_t &>(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4555                                "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
4556                                reinterpret_cast<uint64_t &>(pFence->fence));
4557        }
4558    }
4559
4560    return skipCall;
4561}
4562
4563
4564VKAPI_ATTR VkResult VKAPI_CALL
4565QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
4566    bool skipCall = false;
4567    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4568    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4569    std::unique_lock<std::mutex> lock(global_lock);
4570
4571    auto pQueue = getQueueNode(dev_data, queue);
4572    auto pFence = getFenceNode(dev_data, fence);
4573    skipCall |= ValidateFenceForSubmit(dev_data, pFence);
4574
4575    if (skipCall) {
4576        return VK_ERROR_VALIDATION_FAILED_EXT;
4577    }
4578
4579    // TODO : Review these old print functions and clean up as appropriate
4580    print_mem_list(dev_data);
4581    printCBList(dev_data);
4582
4583    // Mark the fence in-use.
4584    if (pFence) {
4585        SubmitFence(pQueue, pFence);
4586    }
4587
4588    // If a fence is supplied, all the command buffers for this call will be
4589    // delimited by that fence. Otherwise, they go in the untracked portion of
4590    // the queue, and may end up being delimited by a fence supplied in a
4591    // subsequent submission.
4592    auto & submitTarget = pFence ? pFence->submissions : pQueue->untrackedSubmissions;
4593
4594    // Now verify each individual submit
4595    std::unordered_set<VkQueue> processed_other_queues;
4596    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4597        const VkSubmitInfo *submit = &pSubmits[submit_idx];
4598        vector<VkSemaphore> semaphoreList;
4599        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
4600            const VkSemaphore &semaphore = submit->pWaitSemaphores[i];
4601            semaphoreList.push_back(semaphore);
4602            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
4603                if (dev_data->semaphoreMap[semaphore].signaled) {
4604                    dev_data->semaphoreMap[semaphore].signaled = false;
4605                } else {
4606                    skipCall |=
4607                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4608                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4609                                "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
4610                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
4611                }
4612                const VkQueue &other_queue = dev_data->semaphoreMap[semaphore].queue;
4613                if (other_queue != VK_NULL_HANDLE && !processed_other_queues.count(other_queue)) {
4614                    updateTrackedCommandBuffers(dev_data, queue, other_queue, fence);
4615                    processed_other_queues.insert(other_queue);
4616                }
4617            }
4618        }
4619        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
4620            const VkSemaphore &semaphore = submit->pSignalSemaphores[i];
4621            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
4622                semaphoreList.push_back(semaphore);
4623                if (dev_data->semaphoreMap[semaphore].signaled) {
4624                    skipCall |=
4625                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4626                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4627                                "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
4628                                " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
4629                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
4630                                reinterpret_cast<uint64_t &>(dev_data->semaphoreMap[semaphore].queue));
4631                } else {
4632                    dev_data->semaphoreMap[semaphore].signaled = true;
4633                    dev_data->semaphoreMap[semaphore].queue = queue;
4634                }
4635            }
4636        }
4637
4638        // TODO: just add one submission per VkSubmitInfo!
4639        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
4640            auto pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
4641            skipCall |= ValidateCmdBufImageLayouts(dev_data, pCBNode);
4642            if (pCBNode) {
4643
4644                submitTarget.emplace_back(pCBNode->commandBuffer, semaphoreList);
4645                for (auto secondaryCmdBuffer : pCBNode->secondaryCommandBuffers) {
4646                    submitTarget.emplace_back(secondaryCmdBuffer, semaphoreList);
4647                }
4648
4649                pCBNode->submitCount++; // increment submit count
4650                skipCall |= validatePrimaryCommandBufferState(dev_data, pCBNode, semaphoreList);
4651                // Call submit-time functions to validate/update state
4652                for (auto &function : pCBNode->validate_functions) {
4653                    skipCall |= function();
4654                }
4655                for (auto &function : pCBNode->eventUpdates) {
4656                    skipCall |= function(queue);
4657                }
4658                for (auto &function : pCBNode->queryUpdates) {
4659                    skipCall |= function(queue);
4660                }
4661            }
4662        }
4663    }
4664    markCommandBuffersInFlight(dev_data, queue, submitCount, pSubmits, fence);
4665    lock.unlock();
4666    if (!skipCall)
4667        result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence);
4668
4669    return result;
4670}
4671
4672VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
4673                                              const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
4674    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4675    VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
4676    // TODO : Track allocations and overall size here
4677    std::lock_guard<std::mutex> lock(global_lock);
4678    add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
4679    print_mem_list(my_data);
4680    return result;
4681}
4682
4683VKAPI_ATTR void VKAPI_CALL
4684FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
4685    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4686
4687    // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
4688    // Before freeing a memory object, an application must ensure the memory object is no longer
4689    // in use by the device—for example by command buffers queued for execution. The memory need
4690    // not yet be unbound from all images and buffers, but any further use of those images or
4691    // buffers (on host or device) for anything other than destroying those objects will result in
4692    // undefined behavior.
4693
4694    std::unique_lock<std::mutex> lock(global_lock);
4695    freeMemObjInfo(my_data, device, mem, false);
4696    print_mem_list(my_data);
4697    printCBList(my_data);
4698    lock.unlock();
4699    my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator);
4700}
4701
4702static bool validateMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
4703    bool skipCall = false;
4704
4705    if (size == 0) {
4706        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4707                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4708                           "VkMapMemory: Attempting to map memory range of size zero");
4709    }
4710
4711    auto mem_element = my_data->memObjMap.find(mem);
4712    if (mem_element != my_data->memObjMap.end()) {
4713        auto mem_info = mem_element->second.get();
4714        // It is an application error to call VkMapMemory on an object that is already mapped
4715        if (mem_info->memRange.size != 0) {
4716            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4717                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4718                               "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem);
4719        }
4720
4721        // Validate that offset + size is within object's allocationSize
4722        if (size == VK_WHOLE_SIZE) {
4723            if (offset >= mem_info->allocInfo.allocationSize) {
4724                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4725                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
4726                                   "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
4727                                          " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
4728                                   offset, mem_info->allocInfo.allocationSize, mem_info->allocInfo.allocationSize);
4729            }
4730        } else {
4731            if ((offset + size) > mem_info->allocInfo.allocationSize) {
4732                skipCall =
4733                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4734                            (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4735                            "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64, offset,
4736                            size + offset, mem_info->allocInfo.allocationSize);
4737            }
4738        }
4739    }
4740    return skipCall;
4741}
4742
4743static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
4744    auto mem_info = getMemObjInfo(my_data, mem);
4745    if (mem_info) {
4746        mem_info->memRange.offset = offset;
4747        mem_info->memRange.size = size;
4748    }
4749}
4750
4751static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
4752    bool skipCall = false;
4753    auto mem_info = getMemObjInfo(my_data, mem);
4754    if (mem_info) {
4755        if (!mem_info->memRange.size) {
4756            // Valid Usage: memory must currently be mapped
4757            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4758                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4759                               "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
4760        }
4761        mem_info->memRange.size = 0;
4762        if (mem_info->pData) {
4763            free(mem_info->pData);
4764            mem_info->pData = 0;
4765        }
4766    }
4767    return skipCall;
4768}
4769
4770static char NoncoherentMemoryFillValue = 0xb;
4771
4772static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize size, void **ppData) {
4773    auto mem_info = getMemObjInfo(dev_data, mem);
4774    if (mem_info) {
4775        mem_info->pDriverData = *ppData;
4776        uint32_t index = mem_info->allocInfo.memoryTypeIndex;
4777        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
4778            mem_info->pData = 0;
4779        } else {
4780            if (size == VK_WHOLE_SIZE) {
4781                size = mem_info->allocInfo.allocationSize;
4782            }
4783            size_t convSize = (size_t)(size);
4784            mem_info->pData = malloc(2 * convSize);
4785            memset(mem_info->pData, NoncoherentMemoryFillValue, 2 * convSize);
4786            *ppData = static_cast<char *>(mem_info->pData) + (convSize / 2);
4787        }
4788    }
4789}
4790// Verify that state for fence being waited on is appropriate. That is,
4791//  a fence being waited on should not already be signalled and
4792//  it should have been submitted on a queue or during acquire next image
4793static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
4794    bool skipCall = false;
4795
4796    auto pFence = getFenceNode(dev_data, fence);
4797    if (pFence) {
4798        if (pFence->state == FENCE_UNSIGNALED) {
4799            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4800                                reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4801                                "%s called for fence 0x%" PRIxLEAST64 " which has not been submitted on a Queue or during "
4802                                "acquire next image.",
4803                                apiCall, reinterpret_cast<uint64_t &>(fence));
4804        }
4805    }
4806    return skipCall;
4807}
4808
4809VKAPI_ATTR VkResult VKAPI_CALL
4810WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
4811    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4812    bool skip_call = false;
4813    // Verify fence status of submitted fences
4814    std::unique_lock<std::mutex> lock(global_lock);
4815    for (uint32_t i = 0; i < fenceCount; i++) {
4816        skip_call |= verifyWaitFenceState(dev_data, pFences[i], "vkWaitForFences");
4817    }
4818    lock.unlock();
4819    if (skip_call)
4820        return VK_ERROR_VALIDATION_FAILED_EXT;
4821
4822    VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
4823
4824    if (result == VK_SUCCESS) {
4825        lock.lock();
4826        // When we know that all fences are complete we can clean/remove their CBs
4827        if (waitAll || fenceCount == 1) {
4828            skip_call |= decrementResources(dev_data, fenceCount, pFences);
4829        }
4830        // NOTE : Alternate case not handled here is when some fences have completed. In
4831        //  this case for app to guarantee which fences completed it will have to call
4832        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
4833        lock.unlock();
4834    }
4835    if (skip_call)
4836        return VK_ERROR_VALIDATION_FAILED_EXT;
4837    return result;
4838}
4839
4840VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
4841    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4842    bool skipCall = false;
4843    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4844    std::unique_lock<std::mutex> lock(global_lock);
4845    skipCall = verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
4846    lock.unlock();
4847
4848    if (skipCall)
4849        return result;
4850
4851    result = dev_data->device_dispatch_table->GetFenceStatus(device, fence);
4852    bool skip_call = false;
4853    lock.lock();
4854    if (result == VK_SUCCESS) {
4855        skipCall |= decrementResources(dev_data, 1, &fence);
4856    }
4857    lock.unlock();
4858    if (skip_call)
4859        return VK_ERROR_VALIDATION_FAILED_EXT;
4860    return result;
4861}
4862
4863VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
4864                                                            VkQueue *pQueue) {
4865    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4866    dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
4867    std::lock_guard<std::mutex> lock(global_lock);
4868
4869    // Add queue to tracking set only if it is new
4870    auto result = dev_data->queues.emplace(*pQueue);
4871    if (result.second == true) {
4872        QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
4873        pQNode->queue = *pQueue;
4874        pQNode->device = device;
4875    }
4876}
4877
4878VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
4879    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4880    bool skip_call = false;
4881    skip_call |= decrementResources(dev_data, queue);
4882    if (skip_call)
4883        return VK_ERROR_VALIDATION_FAILED_EXT;
4884    VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue);
4885    return result;
4886}
4887
4888VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
4889    bool skip_call = false;
4890    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4891    std::unique_lock<std::mutex> lock(global_lock);
4892    for (auto queue : dev_data->queues) {
4893        skip_call |= decrementResources(dev_data, queue);
4894    }
4895    dev_data->globalInFlightCmdBuffers.clear();
4896    lock.unlock();
4897    if (skip_call)
4898        return VK_ERROR_VALIDATION_FAILED_EXT;
4899    VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device);
4900    return result;
4901}
4902
4903VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
4904    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4905    bool skipCall = false;
4906    std::unique_lock<std::mutex> lock(global_lock);
4907    auto fence_pair = dev_data->fenceMap.find(fence);
4908    if (fence_pair != dev_data->fenceMap.end()) {
4909        if (fence_pair->second.state == FENCE_INFLIGHT) {
4910            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4911                                (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4912                                "Fence 0x%" PRIx64 " is in use.", (uint64_t)(fence));
4913        }
4914        dev_data->fenceMap.erase(fence_pair);
4915    }
4916    lock.unlock();
4917
4918    if (!skipCall)
4919        dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
4920}
4921
4922VKAPI_ATTR void VKAPI_CALL
4923DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
4924    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4925    dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
4926    std::lock_guard<std::mutex> lock(global_lock);
4927    auto item = dev_data->semaphoreMap.find(semaphore);
4928    if (item != dev_data->semaphoreMap.end()) {
4929        if (item->second.in_use.load()) {
4930            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4931                    reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4932                    "Cannot delete semaphore 0x%" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore));
4933        }
4934        dev_data->semaphoreMap.erase(semaphore);
4935    }
4936    // TODO : Clean up any internal data structures using this obj.
4937}
4938
4939VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
4940    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4941    bool skip_call = false;
4942    std::unique_lock<std::mutex> lock(global_lock);
4943    auto event_data = dev_data->eventMap.find(event);
4944    if (event_data != dev_data->eventMap.end()) {
4945        if (event_data->second.in_use.load()) {
4946            skip_call |= log_msg(
4947                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4948                reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4949                "Cannot delete event 0x%" PRIx64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event));
4950        }
4951        dev_data->eventMap.erase(event_data);
4952    }
4953    lock.unlock();
4954    if (!skip_call)
4955        dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator);
4956    // TODO : Clean up any internal data structures using this obj.
4957}
4958
4959VKAPI_ATTR void VKAPI_CALL
4960DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
4961    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
4962        ->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
4963    // TODO : Clean up any internal data structures using this obj.
4964}
4965
4966VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
4967                                                   uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
4968                                                   VkQueryResultFlags flags) {
4969    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4970    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
4971    std::unique_lock<std::mutex> lock(global_lock);
4972    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
4973        auto pCB = getCBNode(dev_data, cmdBuffer);
4974        for (auto queryStatePair : pCB->queryToStateMap) {
4975            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
4976        }
4977    }
4978    bool skip_call = false;
4979    for (uint32_t i = 0; i < queryCount; ++i) {
4980        QueryObject query = {queryPool, firstQuery + i};
4981        auto queryElement = queriesInFlight.find(query);
4982        auto queryToStateElement = dev_data->queryToStateMap.find(query);
4983        if (queryToStateElement != dev_data->queryToStateMap.end()) {
4984            // Available and in flight
4985            if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
4986                queryToStateElement->second) {
4987                for (auto cmdBuffer : queryElement->second) {
4988                    auto pCB = getCBNode(dev_data, cmdBuffer);
4989                    auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
4990                    if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
4991                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4992                                             VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
4993                                             "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
4994                                             (uint64_t)(queryPool), firstQuery + i);
4995                    } else {
4996                        for (auto event : queryEventElement->second) {
4997                            dev_data->eventMap[event].needsSignaled = true;
4998                        }
4999                    }
5000                }
5001                // Unavailable and in flight
5002            } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5003                       !queryToStateElement->second) {
5004                // TODO : Can there be the same query in use by multiple command buffers in flight?
5005                bool make_available = false;
5006                for (auto cmdBuffer : queryElement->second) {
5007                    auto pCB = getCBNode(dev_data, cmdBuffer);
5008                    make_available |= pCB->queryToStateMap[query];
5009                }
5010                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5011                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5012                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5013                                         "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5014                                         (uint64_t)(queryPool), firstQuery + i);
5015                }
5016                // Unavailable
5017            } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5018                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5019                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5020                                     "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5021                                     (uint64_t)(queryPool), firstQuery + i);
5022                // Unitialized
5023            } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5024                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5025                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5026                                     "Cannot get query results on queryPool 0x%" PRIx64
5027                                     " with index %d as data has not been collected for this index.",
5028                                     (uint64_t)(queryPool), firstQuery + i);
5029            }
5030        }
5031    }
5032    lock.unlock();
5033    if (skip_call)
5034        return VK_ERROR_VALIDATION_FAILED_EXT;
5035    return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride,
5036                                                                flags);
5037}
5038
5039static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5040    bool skip_call = false;
5041    auto buffer_node = getBufferNode(my_data, buffer);
5042    if (!buffer_node) {
5043        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5044                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5045                             "Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5046    } else {
5047        if (buffer_node->in_use.load()) {
5048            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5049                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5050                                 "Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5051        }
5052    }
5053    return skip_call;
5054}
5055
5056static bool print_memory_range_error(layer_data *dev_data, const uint64_t object_handle, const uint64_t other_handle,
5057                                     VkDebugReportObjectTypeEXT object_type) {
5058    if (object_type == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) {
5059        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5060                       MEMTRACK_INVALID_ALIASING, "MEM", "Buffer 0x%" PRIx64 " is aliased with image 0x%" PRIx64, object_handle,
5061                       other_handle);
5062    } else {
5063        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5064                       MEMTRACK_INVALID_ALIASING, "MEM", "Image 0x%" PRIx64 " is aliased with buffer 0x%" PRIx64, object_handle,
5065                       other_handle);
5066    }
5067}
5068
5069static bool validate_memory_range(layer_data *dev_data, const vector<MEMORY_RANGE> &ranges, const MEMORY_RANGE &new_range,
5070                                  VkDebugReportObjectTypeEXT object_type) {
5071    bool skip_call = false;
5072
5073    for (auto range : ranges) {
5074        if ((range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) <
5075            (new_range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)))
5076            continue;
5077        if ((range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) >
5078            (new_range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)))
5079            continue;
5080        skip_call |= print_memory_range_error(dev_data, new_range.handle, range.handle, object_type);
5081    }
5082    return skip_call;
5083}
5084
5085static MEMORY_RANGE insert_memory_ranges(uint64_t handle, VkDeviceMemory mem, VkDeviceSize memoryOffset,
5086                                         VkMemoryRequirements memRequirements, vector<MEMORY_RANGE> &ranges) {
5087    MEMORY_RANGE range;
5088    range.handle = handle;
5089    range.memory = mem;
5090    range.start = memoryOffset;
5091    range.end = memoryOffset + memRequirements.size - 1;
5092    ranges.push_back(range);
5093    return range;
5094}
5095
5096static void remove_memory_ranges(uint64_t handle, VkDeviceMemory mem, vector<MEMORY_RANGE> &ranges) {
5097    for (uint32_t item = 0; item < ranges.size(); item++) {
5098        if ((ranges[item].handle == handle) && (ranges[item].memory == mem)) {
5099            ranges.erase(ranges.begin() + item);
5100            break;
5101        }
5102    }
5103}
5104
5105VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer,
5106                                         const VkAllocationCallbacks *pAllocator) {
5107    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5108    bool skipCall = false;
5109    std::unique_lock<std::mutex> lock(global_lock);
5110    if (!validateIdleBuffer(dev_data, buffer) && !skipCall) {
5111        lock.unlock();
5112        dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator);
5113        lock.lock();
5114    }
5115    // Clean up memory binding and range information for buffer
5116    auto buff_it = dev_data->bufferMap.find(buffer);
5117    if (buff_it != dev_data->bufferMap.end()) {
5118        auto mem_info = getMemObjInfo(dev_data, buff_it->second.get()->mem);
5119        if (mem_info) {
5120            remove_memory_ranges(reinterpret_cast<uint64_t &>(buffer), buff_it->second.get()->mem, mem_info->bufferRanges);
5121        }
5122        clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5123        dev_data->bufferMap.erase(buff_it);
5124    }
5125}
5126
5127VKAPI_ATTR void VKAPI_CALL
5128DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5129    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5130    dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
5131    std::lock_guard<std::mutex> lock(global_lock);
5132    auto item = dev_data->bufferViewMap.find(bufferView);
5133    if (item != dev_data->bufferViewMap.end()) {
5134        dev_data->bufferViewMap.erase(item);
5135    }
5136}
5137
5138VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5139    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5140    bool skipCall = false;
5141    if (!skipCall) {
5142        dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
5143    }
5144
5145    std::lock_guard<std::mutex> lock(global_lock);
5146    const auto &imageEntry = dev_data->imageMap.find(image);
5147    if (imageEntry != dev_data->imageMap.end()) {
5148        // Clean up memory mapping, bindings and range references for image
5149        auto mem_info = getMemObjInfo(dev_data, imageEntry->second.get()->mem);
5150        if (mem_info) {
5151            remove_memory_ranges(reinterpret_cast<uint64_t &>(image), imageEntry->second.get()->mem, mem_info->imageRanges);
5152            clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5153            mem_info->image = VK_NULL_HANDLE;
5154        }
5155        // Remove image from imageMap
5156        dev_data->imageMap.erase(imageEntry);
5157    }
5158    const auto& subEntry = dev_data->imageSubresourceMap.find(image);
5159    if (subEntry != dev_data->imageSubresourceMap.end()) {
5160        for (const auto& pair : subEntry->second) {
5161            dev_data->imageLayoutMap.erase(pair);
5162        }
5163        dev_data->imageSubresourceMap.erase(subEntry);
5164    }
5165}
5166
5167static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
5168                                  const char *funcName) {
5169    bool skip_call = false;
5170    if (((1 << mem_info->allocInfo.memoryTypeIndex) & memory_type_bits) == 0) {
5171        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5172                            reinterpret_cast<const uint64_t &>(mem_info->mem), __LINE__, MEMTRACK_INVALID_MEM_TYPE, "MT",
5173                            "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
5174                            "type (0x%X) of this memory object 0x%" PRIx64 ".",
5175                            funcName, memory_type_bits, mem_info->allocInfo.memoryTypeIndex,
5176                            reinterpret_cast<const uint64_t &>(mem_info->mem));
5177    }
5178    return skip_call;
5179}
5180
5181VKAPI_ATTR VkResult VKAPI_CALL
5182BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5183    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5184    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5185    std::unique_lock<std::mutex> lock(global_lock);
5186    // Track objects tied to memory
5187    uint64_t buffer_handle = (uint64_t)(buffer);
5188    bool skipCall =
5189        set_mem_binding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5190    auto buffer_node = getBufferNode(dev_data, buffer);
5191    if (buffer_node) {
5192        buffer_node->mem = mem;
5193        VkMemoryRequirements memRequirements;
5194        dev_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, &memRequirements);
5195
5196        // Track and validate bound memory range information
5197        auto mem_info = getMemObjInfo(dev_data, mem);
5198        if (mem_info) {
5199            const MEMORY_RANGE range =
5200                insert_memory_ranges(buffer_handle, mem, memoryOffset, memRequirements, mem_info->bufferRanges);
5201            skipCall |= validate_memory_range(dev_data, mem_info->imageRanges, range, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5202            skipCall |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "BindBufferMemory");
5203        }
5204
5205        // Validate memory requirements alignment
5206        if (vk_safe_modulo(memoryOffset, memRequirements.alignment) != 0) {
5207            skipCall |=
5208                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
5209                        __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS",
5210                        "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the "
5211                        "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
5212                        ", returned from a call to vkGetBufferMemoryRequirements with buffer",
5213                        memoryOffset, memRequirements.alignment);
5214        }
5215        // Validate device limits alignments
5216        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
5217        if (usage & (VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)) {
5218            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment) != 0) {
5219                skipCall |=
5220                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5221                            0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS",
5222                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5223                            "device limit minTexelBufferOffsetAlignment 0x%" PRIxLEAST64,
5224                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment);
5225            }
5226        }
5227        if (usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) {
5228            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) !=
5229                0) {
5230                skipCall |=
5231                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5232                            0, __LINE__, DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
5233                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5234                            "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64,
5235                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
5236            }
5237        }
5238        if (usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) {
5239            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) !=
5240                0) {
5241                skipCall |=
5242                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5243                            0, __LINE__, DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
5244                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5245                            "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64,
5246                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
5247            }
5248        }
5249    }
5250    print_mem_list(dev_data);
5251    lock.unlock();
5252    if (!skipCall) {
5253        result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset);
5254    }
5255    return result;
5256}
5257
5258VKAPI_ATTR void VKAPI_CALL
5259GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5260    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5261    // TODO : What to track here?
5262    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
5263    my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5264}
5265
5266VKAPI_ATTR void VKAPI_CALL
5267GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5268    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5269    // TODO : What to track here?
5270    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
5271    my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements);
5272}
5273
5274VKAPI_ATTR void VKAPI_CALL
5275DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5276    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5277        ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
5278    // TODO : Clean up any internal data structures using this obj.
5279}
5280
5281VKAPI_ATTR void VKAPI_CALL
5282DestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
5283    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5284
5285    std::unique_lock<std::mutex> lock(global_lock);
5286    my_data->shaderModuleMap.erase(shaderModule);
5287    lock.unlock();
5288
5289    my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
5290}
5291
5292VKAPI_ATTR void VKAPI_CALL
5293DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
5294    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator);
5295    // TODO : Clean up any internal data structures using this obj.
5296}
5297
5298VKAPI_ATTR void VKAPI_CALL
5299DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
5300    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5301        ->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
5302    // TODO : Clean up any internal data structures using this obj.
5303}
5304
5305VKAPI_ATTR void VKAPI_CALL
5306DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
5307    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator);
5308    // TODO : Clean up any internal data structures using this obj.
5309}
5310
5311VKAPI_ATTR void VKAPI_CALL
5312DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
5313    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5314        ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
5315    // TODO : Clean up any internal data structures using this obj.
5316}
5317
5318VKAPI_ATTR void VKAPI_CALL
5319DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
5320    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5321        ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
5322    // TODO : Clean up any internal data structures using this obj.
5323}
5324// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result
5325//  If this is a secondary command buffer, then make sure its primary is also in-flight
5326//  If primary is not in-flight, then remove secondary from global in-flight set
5327// This function is only valid at a point when cmdBuffer is being reset or freed
5328static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action) {
5329    bool skip_call = false;
5330    if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
5331        // Primary CB or secondary where primary is also in-flight is an error
5332        if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
5333            (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
5334            skip_call |= log_msg(
5335                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5336                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
5337                "Attempt to %s command buffer (0x%" PRIxLEAST64 ") which is in use.", action,
5338                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer));
5339        }
5340    }
5341    return skip_call;
5342}
5343
5344// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
5345static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action) {
5346    bool skip_call = false;
5347    for (auto cmd_buffer : pPool->commandBuffers) {
5348        if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
5349            skip_call |= checkCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action);
5350        }
5351    }
5352    return skip_call;
5353}
5354
5355static void clearCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool) {
5356    for (auto cmd_buffer : pPool->commandBuffers) {
5357        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
5358    }
5359}
5360
5361VKAPI_ATTR void VKAPI_CALL
5362FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
5363    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5364
5365    bool skip_call = false;
5366    std::unique_lock<std::mutex> lock(global_lock);
5367    for (uint32_t i = 0; i < commandBufferCount; i++) {
5368        auto cb_pair = dev_data->commandBufferMap.find(pCommandBuffers[i]);
5369        // Delete CB information structure, and remove from commandBufferMap
5370        if (cb_pair != dev_data->commandBufferMap.end()) {
5371            skip_call |= checkCommandBufferInFlight(dev_data, cb_pair->second, "free");
5372            dev_data->globalInFlightCmdBuffers.erase(cb_pair->first);
5373            // reset prior to delete for data clean-up
5374            resetCB(dev_data, (*cb_pair).second->commandBuffer);
5375            delete (*cb_pair).second;
5376            dev_data->commandBufferMap.erase(cb_pair);
5377        }
5378
5379        // Remove commandBuffer reference from commandPoolMap
5380        dev_data->commandPoolMap[commandPool].commandBuffers.remove(pCommandBuffers[i]);
5381    }
5382    printCBList(dev_data);
5383    lock.unlock();
5384
5385    if (!skip_call)
5386        dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
5387}
5388
5389VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
5390                                                 const VkAllocationCallbacks *pAllocator,
5391                                                 VkCommandPool *pCommandPool) {
5392    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5393
5394    VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
5395
5396    if (VK_SUCCESS == result) {
5397        std::lock_guard<std::mutex> lock(global_lock);
5398        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
5399        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
5400    }
5401    return result;
5402}
5403
5404VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
5405                                               const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
5406
5407    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5408    VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
5409    if (result == VK_SUCCESS) {
5410        std::lock_guard<std::mutex> lock(global_lock);
5411        dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo;
5412    }
5413    return result;
5414}
5415
5416// Destroy commandPool along with all of the commandBuffers allocated from that pool
5417VKAPI_ATTR void VKAPI_CALL
5418DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
5419    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5420    bool skipCall = false;
5421    std::unique_lock<std::mutex> lock(global_lock);
5422    // Verify that command buffers in pool are complete (not in-flight)
5423    auto pPool = getCommandPoolNode(dev_data, commandPool);
5424    skipCall |= checkCommandBuffersInFlight(dev_data, pPool, "destroy command pool with");
5425
5426    lock.unlock();
5427
5428    if (skipCall)
5429        return;
5430
5431    dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
5432
5433    lock.lock();
5434    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandPoolMap
5435    clearCommandBuffersInFlight(dev_data, pPool);
5436    for (auto cb : pPool->commandBuffers) {
5437        clear_cmd_buf_and_mem_references(dev_data, cb);
5438        auto del_cb = dev_data->commandBufferMap.find(cb);
5439        delete del_cb->second;                  // delete CB info structure
5440        dev_data->commandBufferMap.erase(del_cb); // Remove this command buffer
5441    }
5442    dev_data->commandPoolMap.erase(commandPool);
5443    lock.unlock();
5444}
5445
5446VKAPI_ATTR VkResult VKAPI_CALL
5447ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
5448    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5449    bool skipCall = false;
5450
5451    auto pPool = getCommandPoolNode(dev_data, commandPool);
5452    skipCall |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with");
5453
5454    if (skipCall)
5455        return VK_ERROR_VALIDATION_FAILED_EXT;
5456
5457    VkResult result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
5458
5459    // Reset all of the CBs allocated from this pool
5460    if (VK_SUCCESS == result) {
5461        std::lock_guard<std::mutex> lock(global_lock);
5462        clearCommandBuffersInFlight(dev_data, pPool);
5463        for (auto cmdBuffer : pPool->commandBuffers) {
5464            resetCB(dev_data, cmdBuffer);
5465        }
5466    }
5467    return result;
5468}
5469
5470VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
5471    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5472    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5473    bool skipCall = false;
5474    std::unique_lock<std::mutex> lock(global_lock);
5475    for (uint32_t i = 0; i < fenceCount; ++i) {
5476        auto pFence = getFenceNode(dev_data, pFences[i]);
5477        if (pFence) {
5478            if (pFence->state == FENCE_INFLIGHT) {
5479                skipCall |=
5480                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5481                            reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5482                            "Fence 0x%" PRIx64 " is in use.", reinterpret_cast<const uint64_t &>(pFences[i]));
5483            }
5484            pFence->state = FENCE_UNSIGNALED;
5485
5486            // TODO: these should really have already been enforced on
5487            // INFLIGHT->RETIRED transition.
5488            pFence->queues.clear();
5489            pFence->priorFences.clear();
5490        }
5491    }
5492    lock.unlock();
5493    if (!skipCall)
5494        result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences);
5495    return result;
5496}
5497
5498VKAPI_ATTR void VKAPI_CALL
5499DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
5500    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5501    std::unique_lock<std::mutex> lock(global_lock);
5502    auto fbNode = dev_data->frameBufferMap.find(framebuffer);
5503    if (fbNode != dev_data->frameBufferMap.end()) {
5504        for (auto cb : fbNode->second->referencingCmdBuffers) {
5505            auto cbNode = dev_data->commandBufferMap.find(cb);
5506            if (cbNode != dev_data->commandBufferMap.end()) {
5507                // Set CB as invalid and record destroyed framebuffer
5508                cbNode->second->state = CB_INVALID;
5509                cbNode->second->destroyedFramebuffers.insert(framebuffer);
5510            }
5511        }
5512        dev_data->frameBufferMap.erase(fbNode);
5513    }
5514    lock.unlock();
5515    dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
5516}
5517
5518VKAPI_ATTR void VKAPI_CALL
5519DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
5520    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5521    dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
5522    std::lock_guard<std::mutex> lock(global_lock);
5523    dev_data->renderPassMap.erase(renderPass);
5524}
5525
5526VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
5527                                            const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
5528    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5529
5530    VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
5531
5532    if (VK_SUCCESS == result) {
5533        std::lock_guard<std::mutex> lock(global_lock);
5534        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
5535        dev_data->bufferMap.insert(std::make_pair(*pBuffer, unique_ptr<BUFFER_NODE>(new BUFFER_NODE(pCreateInfo))));
5536    }
5537    return result;
5538}
5539
5540VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
5541                                                const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
5542    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5543    VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
5544    if (VK_SUCCESS == result) {
5545        std::lock_guard<std::mutex> lock(global_lock);
5546        dev_data->bufferViewMap[*pView] = unique_ptr<VkBufferViewCreateInfo>(new VkBufferViewCreateInfo(*pCreateInfo));
5547        // In order to create a valid buffer view, the buffer must have been created with at least one of the
5548        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
5549        validate_buffer_usage_flags(dev_data, pCreateInfo->buffer,
5550                                    VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false,
5551                                    "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
5552    }
5553    return result;
5554}
5555
5556VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
5557                                           const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
5558    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5559
5560    VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
5561
5562    if (VK_SUCCESS == result) {
5563        std::lock_guard<std::mutex> lock(global_lock);
5564        IMAGE_LAYOUT_NODE image_node;
5565        image_node.layout = pCreateInfo->initialLayout;
5566        image_node.format = pCreateInfo->format;
5567        dev_data->imageMap.insert(std::make_pair(*pImage, unique_ptr<IMAGE_NODE>(new IMAGE_NODE(pCreateInfo))));
5568        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
5569        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
5570        dev_data->imageLayoutMap[subpair] = image_node;
5571    }
5572    return result;
5573}
5574
5575static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
5576    /* expects global_lock to be held by caller */
5577
5578    auto image_node = getImageNode(dev_data, image);
5579    if (image_node) {
5580        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
5581         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
5582         * the actual values.
5583         */
5584        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
5585            range->levelCount = image_node->createInfo.mipLevels - range->baseMipLevel;
5586        }
5587
5588        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
5589            range->layerCount = image_node->createInfo.arrayLayers - range->baseArrayLayer;
5590        }
5591    }
5592}
5593
5594// Return the correct layer/level counts if the caller used the special
5595// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
5596static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
5597                                         VkImage image) {
5598    /* expects global_lock to be held by caller */
5599
5600    *levels = range.levelCount;
5601    *layers = range.layerCount;
5602    auto image_node = getImageNode(dev_data, image);
5603    if (image_node) {
5604        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
5605            *levels = image_node->createInfo.mipLevels - range.baseMipLevel;
5606        }
5607        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
5608            *layers = image_node->createInfo.arrayLayers - range.baseArrayLayer;
5609        }
5610    }
5611}
5612
5613VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
5614                                               const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
5615    bool skipCall = false;
5616    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5617    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5618    {
5619        // Validate that img has correct usage flags set
5620        std::lock_guard<std::mutex> lock(global_lock);
5621        skipCall |= validate_image_usage_flags(dev_data, pCreateInfo->image,
5622                VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT |
5623                VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
5624                false, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT");
5625    }
5626
5627    if (!skipCall) {
5628        result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
5629    }
5630
5631    if (VK_SUCCESS == result) {
5632        std::lock_guard<std::mutex> lock(global_lock);
5633        dev_data->imageViewMap[*pView] = unique_ptr<VkImageViewCreateInfo>(new VkImageViewCreateInfo(*pCreateInfo));
5634        ResolveRemainingLevelsLayers(dev_data, &dev_data->imageViewMap[*pView].get()->subresourceRange, pCreateInfo->image);
5635    }
5636
5637    return result;
5638}
5639
5640VKAPI_ATTR VkResult VKAPI_CALL
5641CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
5642    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5643    VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
5644    if (VK_SUCCESS == result) {
5645        std::lock_guard<std::mutex> lock(global_lock);
5646        auto &fence_node = dev_data->fenceMap[*pFence];
5647        fence_node.fence = *pFence;
5648        fence_node.createInfo = *pCreateInfo;
5649        fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
5650    }
5651    return result;
5652}
5653
5654// TODO handle pipeline caches
5655VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
5656                                                   const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
5657    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5658    VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
5659    return result;
5660}
5661
5662VKAPI_ATTR void VKAPI_CALL
5663DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
5664    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5665    dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator);
5666}
5667
5668VKAPI_ATTR VkResult VKAPI_CALL
5669GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
5670    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5671    VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
5672    return result;
5673}
5674
5675VKAPI_ATTR VkResult VKAPI_CALL
5676MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
5677    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5678    VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
5679    return result;
5680}
5681
5682// utility function to set collective state for pipeline
5683void set_pipeline_state(PIPELINE_NODE *pPipe) {
5684    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
5685    if (pPipe->graphicsPipelineCI.pColorBlendState) {
5686        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
5687            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
5688                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5689                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5690                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5691                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5692                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5693                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5694                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5695                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
5696                    pPipe->blendConstantsEnabled = true;
5697                }
5698            }
5699        }
5700    }
5701}
5702
5703VKAPI_ATTR VkResult VKAPI_CALL
5704CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
5705                        const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
5706                        VkPipeline *pPipelines) {
5707    VkResult result = VK_SUCCESS;
5708    // TODO What to do with pipelineCache?
5709    // The order of operations here is a little convoluted but gets the job done
5710    //  1. Pipeline create state is first shadowed into PIPELINE_NODE struct
5711    //  2. Create state is then validated (which uses flags setup during shadowing)
5712    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
5713    bool skipCall = false;
5714    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
5715    vector<PIPELINE_NODE *> pPipeNode(count);
5716    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5717
5718    uint32_t i = 0;
5719    std::unique_lock<std::mutex> lock(global_lock);
5720
5721    for (i = 0; i < count; i++) {
5722        pPipeNode[i] = new PIPELINE_NODE;
5723        pPipeNode[i]->initGraphicsPipeline(&pCreateInfos[i]);
5724        pPipeNode[i]->renderPass = getRenderPass(dev_data, pCreateInfos[i].renderPass);
5725        pPipeNode[i]->pipelineLayout = getPipelineLayout(dev_data, pCreateInfos[i].layout);
5726
5727        skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
5728    }
5729
5730    if (!skipCall) {
5731        lock.unlock();
5732        result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
5733                                                                          pPipelines);
5734        lock.lock();
5735        for (i = 0; i < count; i++) {
5736            pPipeNode[i]->pipeline = pPipelines[i];
5737            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
5738        }
5739        lock.unlock();
5740    } else {
5741        for (i = 0; i < count; i++) {
5742            delete pPipeNode[i];
5743        }
5744        lock.unlock();
5745        return VK_ERROR_VALIDATION_FAILED_EXT;
5746    }
5747    return result;
5748}
5749
5750VKAPI_ATTR VkResult VKAPI_CALL
5751CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
5752                       const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
5753                       VkPipeline *pPipelines) {
5754    VkResult result = VK_SUCCESS;
5755    bool skipCall = false;
5756
5757    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
5758    vector<PIPELINE_NODE *> pPipeNode(count);
5759    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5760
5761    uint32_t i = 0;
5762    std::unique_lock<std::mutex> lock(global_lock);
5763    for (i = 0; i < count; i++) {
5764        // TODO: Verify compute stage bits
5765
5766        // Create and initialize internal tracking data structure
5767        pPipeNode[i] = new PIPELINE_NODE;
5768        pPipeNode[i]->initComputePipeline(&pCreateInfos[i]);
5769        pPipeNode[i]->pipelineLayout = getPipelineLayout(dev_data, pCreateInfos[i].layout);
5770        // memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
5771
5772        // TODO: Add Compute Pipeline Verification
5773        skipCall |= !validate_compute_pipeline(dev_data->report_data, pPipeNode[i],
5774                                               &dev_data->phys_dev_properties.features,
5775                                               dev_data->shaderModuleMap);
5776        // skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
5777    }
5778
5779    if (!skipCall) {
5780        lock.unlock();
5781        result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
5782                                                                         pPipelines);
5783        lock.lock();
5784        for (i = 0; i < count; i++) {
5785            pPipeNode[i]->pipeline = pPipelines[i];
5786            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
5787        }
5788        lock.unlock();
5789    } else {
5790        for (i = 0; i < count; i++) {
5791            // Clean up any locally allocated data structures
5792            delete pPipeNode[i];
5793        }
5794        lock.unlock();
5795        return VK_ERROR_VALIDATION_FAILED_EXT;
5796    }
5797    return result;
5798}
5799
5800VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
5801                                             const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
5802    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5803    VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler);
5804    if (VK_SUCCESS == result) {
5805        std::lock_guard<std::mutex> lock(global_lock);
5806        dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
5807    }
5808    return result;
5809}
5810
5811VKAPI_ATTR VkResult VKAPI_CALL
5812CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
5813                          const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
5814    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5815    VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
5816    if (VK_SUCCESS == result) {
5817        // TODOSC : Capture layout bindings set
5818        std::lock_guard<std::mutex> lock(global_lock);
5819        dev_data->descriptorSetLayoutMap[*pSetLayout] =
5820            new cvdescriptorset::DescriptorSetLayout(dev_data->report_data, pCreateInfo, *pSetLayout);
5821    }
5822    return result;
5823}
5824
5825// Used by CreatePipelineLayout and CmdPushConstants.
5826// Note that the index argument is optional and only used by CreatePipelineLayout.
5827static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
5828                                      const char *caller_name, uint32_t index = 0) {
5829    uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
5830    bool skipCall = false;
5831    // Check that offset + size don't exceed the max.
5832    // Prevent arithetic overflow here by avoiding addition and testing in this order.
5833    if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
5834        // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
5835        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5836            skipCall |=
5837                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5838                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with offset %u and size %u that "
5839                                                              "exceeds this device's maxPushConstantSize of %u.",
5840                        caller_name, index, offset, size, maxPushConstantsSize);
5841        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5842            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5843                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
5844                                                                      "exceeds this device's maxPushConstantSize of %u.",
5845                                caller_name, offset, size, maxPushConstantsSize);
5846        } else {
5847            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5848                                DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
5849        }
5850    }
5851    // size needs to be non-zero and a multiple of 4.
5852    if ((size == 0) || ((size & 0x3) != 0)) {
5853        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5854            skipCall |=
5855                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5856                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
5857                                                              "size %u. Size must be greater than zero and a multiple of 4.",
5858                        caller_name, index, size);
5859        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5860            skipCall |=
5861                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5862                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
5863                                                              "size %u. Size must be greater than zero and a multiple of 4.",
5864                        caller_name, size);
5865        } else {
5866            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5867                                DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
5868        }
5869    }
5870    // offset needs to be a multiple of 4.
5871    if ((offset & 0x3) != 0) {
5872        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5873            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5874                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
5875                                                                      "offset %u. Offset must be a multiple of 4.",
5876                                caller_name, index, offset);
5877        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5878            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5879                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
5880                                                                      "offset %u. Offset must be a multiple of 4.",
5881                                caller_name, offset);
5882        } else {
5883            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5884                                DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
5885        }
5886    }
5887    return skipCall;
5888}
5889
5890VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
5891                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
5892    bool skipCall = false;
5893    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5894    // Push Constant Range checks
5895    uint32_t i = 0;
5896    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5897        skipCall |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
5898                                              pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
5899        if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
5900            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5901                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has no stageFlags set.");
5902        }
5903    }
5904    // Each range has been validated.  Now check for overlap between ranges (if they are good).
5905    if (!skipCall) {
5906        uint32_t i, j;
5907        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5908            for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
5909                const uint32_t minA = pCreateInfo->pPushConstantRanges[i].offset;
5910                const uint32_t maxA = minA + pCreateInfo->pPushConstantRanges[i].size;
5911                const uint32_t minB = pCreateInfo->pPushConstantRanges[j].offset;
5912                const uint32_t maxB = minB + pCreateInfo->pPushConstantRanges[j].size;
5913                if ((minA <= minB && maxA > minB) || (minB <= minA && maxB > minA)) {
5914                    skipCall |=
5915                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5916                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constants with "
5917                                                                      "overlapping ranges: %u:[%u, %u), %u:[%u, %u)",
5918                                i, minA, maxA, j, minB, maxB);
5919                }
5920            }
5921        }
5922    }
5923
5924    if (skipCall)
5925        return VK_ERROR_VALIDATION_FAILED_EXT;
5926
5927    VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
5928    if (VK_SUCCESS == result) {
5929        std::lock_guard<std::mutex> lock(global_lock);
5930        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
5931        plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount);
5932        plNode.setLayouts.resize(pCreateInfo->setLayoutCount);
5933        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
5934            plNode.descriptorSetLayouts[i] = pCreateInfo->pSetLayouts[i];
5935            plNode.setLayouts[i] = getDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
5936        }
5937        plNode.pushConstantRanges.resize(pCreateInfo->pushConstantRangeCount);
5938        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5939            plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i];
5940        }
5941    }
5942    return result;
5943}
5944
5945VKAPI_ATTR VkResult VKAPI_CALL
5946CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
5947                     VkDescriptorPool *pDescriptorPool) {
5948    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5949    VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
5950    if (VK_SUCCESS == result) {
5951        // Insert this pool into Global Pool LL at head
5952        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5953                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool 0x%" PRIxLEAST64,
5954                    (uint64_t)*pDescriptorPool))
5955            return VK_ERROR_VALIDATION_FAILED_EXT;
5956        DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
5957        if (NULL == pNewNode) {
5958            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5959                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
5960                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
5961                return VK_ERROR_VALIDATION_FAILED_EXT;
5962        } else {
5963            std::lock_guard<std::mutex> lock(global_lock);
5964            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
5965        }
5966    } else {
5967        // Need to do anything if pool create fails?
5968    }
5969    return result;
5970}
5971
5972VKAPI_ATTR VkResult VKAPI_CALL
5973ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
5974    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5975    VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags);
5976    if (VK_SUCCESS == result) {
5977        std::lock_guard<std::mutex> lock(global_lock);
5978        clearDescriptorPool(dev_data, device, descriptorPool, flags);
5979    }
5980    return result;
5981}
5982// Ensure the pool contains enough descriptors and descriptor sets to satisfy
5983// an allocation request. Fills common_data with the total number of descriptors of each type required,
5984// as well as DescriptorSetLayout ptrs used for later update.
5985static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
5986                                                  cvdescriptorset::AllocateDescriptorSetsData *common_data) {
5987    // All state checks for AllocateDescriptorSets is done in single function
5988    return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data->report_data, pAllocateInfo, dev_data, common_data);
5989}
5990// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
5991static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
5992                                                 VkDescriptorSet *pDescriptorSets,
5993                                                 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
5994    // All the updates are contained in a single cvdescriptorset function
5995    cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
5996                                                   &dev_data->setMap, dev_data);
5997}
5998
5999VKAPI_ATTR VkResult VKAPI_CALL
6000AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
6001    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6002    std::unique_lock<std::mutex> lock(global_lock);
6003    cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
6004    bool skip_call = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
6005    lock.unlock();
6006
6007    if (skip_call)
6008        return VK_ERROR_VALIDATION_FAILED_EXT;
6009
6010    VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6011
6012    if (VK_SUCCESS == result) {
6013        lock.lock();
6014        PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
6015        lock.unlock();
6016    }
6017    return result;
6018}
6019// Verify state before freeing DescriptorSets
6020static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6021                                              const VkDescriptorSet *descriptor_sets) {
6022    bool skip_call = false;
6023    // First make sure sets being destroyed are not currently in-use
6024    for (uint32_t i = 0; i < count; ++i)
6025        skip_call |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
6026
6027    DESCRIPTOR_POOL_NODE *pool_node = getPoolNode(dev_data, pool);
6028    if (pool_node && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_node->createInfo.flags)) {
6029        // Can't Free from a NON_FREE pool
6030        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6031                             reinterpret_cast<uint64_t &>(pool), __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
6032                             "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6033                             "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
6034    }
6035    return skip_call;
6036}
6037// Sets have been removed from the pool so update underlying state
6038static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6039                                             const VkDescriptorSet *descriptor_sets) {
6040    DESCRIPTOR_POOL_NODE *pool_state = getPoolNode(dev_data, pool);
6041    // Update available descriptor sets in pool
6042    pool_state->availableSets += count;
6043
6044    // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
6045    for (uint32_t i = 0; i < count; ++i) {
6046        auto set_state = dev_data->setMap[descriptor_sets[i]];
6047        uint32_t type_index = 0, descriptor_count = 0;
6048        for (uint32_t j = 0; j < set_state->GetBindingCount(); ++j) {
6049            type_index = static_cast<uint32_t>(set_state->GetTypeFromIndex(j));
6050            descriptor_count = set_state->GetDescriptorCountFromIndex(j);
6051            pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
6052        }
6053        freeDescriptorSet(dev_data, set_state);
6054        pool_state->sets.erase(set_state);
6055    }
6056}
6057
6058VKAPI_ATTR VkResult VKAPI_CALL
6059FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6060    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6061    // Make sure that no sets being destroyed are in-flight
6062    std::unique_lock<std::mutex> lock(global_lock);
6063    bool skipCall = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6064    lock.unlock();
6065    if (skipCall)
6066        return VK_ERROR_VALIDATION_FAILED_EXT;
6067    VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6068    if (VK_SUCCESS == result) {
6069        lock.lock();
6070        PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6071        lock.unlock();
6072    }
6073    return result;
6074}
6075// TODO : This is a Proof-of-concept for core validation architecture
6076//  Really we'll want to break out these functions to separate files but
6077//  keeping it all together here to prove out design
6078// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
6079static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6080                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6081                                                const VkCopyDescriptorSet *pDescriptorCopies) {
6082    // First thing to do is perform map look-ups.
6083    // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
6084    //  so we can't just do a single map look-up up-front, but do them individually in functions below
6085
6086    // Now make call(s) that validate state, but don't perform state updates in this function
6087    // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
6088    //  namespace which will parse params and make calls into specific class instances
6089    return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
6090                                                         descriptorCopyCount, pDescriptorCopies);
6091}
6092// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
6093static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6094                                               const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6095                                               const VkCopyDescriptorSet *pDescriptorCopies) {
6096    cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6097                                                 pDescriptorCopies);
6098}
6099
6100VKAPI_ATTR void VKAPI_CALL
6101UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6102                     uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6103    // Only map look-up at top level is for device-level layer_data
6104    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6105    std::unique_lock<std::mutex> lock(global_lock);
6106    bool skip_call = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6107                                                         pDescriptorCopies);
6108    lock.unlock();
6109    if (!skip_call) {
6110        dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6111                                                              pDescriptorCopies);
6112        lock.lock();
6113        // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
6114        PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6115                                           pDescriptorCopies);
6116    }
6117}
6118
6119VKAPI_ATTR VkResult VKAPI_CALL
6120AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6121    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6122    VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6123    if (VK_SUCCESS == result) {
6124        std::unique_lock<std::mutex> lock(global_lock);
6125        auto const &cp_it = dev_data->commandPoolMap.find(pCreateInfo->commandPool);
6126        if (cp_it != dev_data->commandPoolMap.end()) {
6127            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6128                // Add command buffer to its commandPool map
6129                cp_it->second.commandBuffers.push_back(pCommandBuffer[i]);
6130                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6131                // Add command buffer to map
6132                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6133                resetCB(dev_data, pCommandBuffer[i]);
6134                pCB->createInfo = *pCreateInfo;
6135                pCB->device = device;
6136            }
6137        }
6138        printCBList(dev_data);
6139        lock.unlock();
6140    }
6141    return result;
6142}
6143
6144VKAPI_ATTR VkResult VKAPI_CALL
6145BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6146    bool skipCall = false;
6147    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6148    std::unique_lock<std::mutex> lock(global_lock);
6149    // Validate command buffer level
6150    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6151    if (pCB) {
6152        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6153        if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
6154            skipCall |=
6155                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6156                        (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6157                        "Calling vkBeginCommandBuffer() on active CB 0x%p before it has completed. "
6158                        "You must check CB fence before this call.",
6159                        commandBuffer);
6160        }
6161        clear_cmd_buf_and_mem_references(dev_data, pCB);
6162        if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6163            // Secondary Command Buffer
6164            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6165            if (!pInfo) {
6166                skipCall |=
6167                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6168                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6169                            "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info.",
6170                            reinterpret_cast<void *>(commandBuffer));
6171            } else {
6172                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6173                    if (!pInfo->renderPass) { // renderpass should NOT be null for a Secondary CB
6174                        skipCall |= log_msg(
6175                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6176                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6177                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must specify a valid renderpass parameter.",
6178                            reinterpret_cast<void *>(commandBuffer));
6179                    }
6180                    if (!pInfo->framebuffer) { // framebuffer may be null for a Secondary CB, but this affects perf
6181                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6182                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6183                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE,
6184                                            "DS", "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) may perform better if a "
6185                                                  "valid framebuffer parameter is specified.",
6186                                            reinterpret_cast<void *>(commandBuffer));
6187                    } else {
6188                        string errorString = "";
6189                        auto framebuffer = getFramebuffer(dev_data, pInfo->framebuffer);
6190                        if (framebuffer) {
6191                            VkRenderPass fbRP = framebuffer->createInfo.renderPass;
6192                            if (!verify_renderpass_compatibility(dev_data, fbRP, pInfo->renderPass, errorString)) {
6193                                // renderPass that framebuffer was created with must be compatible with local renderPass
6194                                skipCall |=
6195                                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6196                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6197                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
6198                                            "DS", "vkBeginCommandBuffer(): Secondary Command "
6199                                                  "Buffer (0x%p) renderPass (0x%" PRIxLEAST64 ") is incompatible w/ framebuffer "
6200                                                  "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
6201                                            reinterpret_cast<void *>(commandBuffer), (uint64_t)(pInfo->renderPass),
6202                                            (uint64_t)(pInfo->framebuffer), (uint64_t)(fbRP), errorString.c_str());
6203                            }
6204                            // Connect this framebuffer to this cmdBuffer
6205                            framebuffer->referencingCmdBuffers.insert(pCB->commandBuffer);
6206                        }
6207                    }
6208                }
6209                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
6210                     dev_data->phys_dev_properties.features.occlusionQueryPrecise == VK_FALSE) &&
6211                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
6212                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6213                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6214                                        __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6215                                        "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
6216                                        "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
6217                                        "support precise occlusion queries.",
6218                                        reinterpret_cast<void *>(commandBuffer));
6219                }
6220            }
6221            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
6222                auto renderPass = getRenderPass(dev_data, pInfo->renderPass);
6223                if (renderPass) {
6224                    if (pInfo->subpass >= renderPass->pCreateInfo->subpassCount) {
6225                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6226                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
6227                                            DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6228                                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must has a subpass index (%d) "
6229                                            "that is less than the number of subpasses (%d).",
6230                                            (void *)commandBuffer, pInfo->subpass, renderPass->pCreateInfo->subpassCount);
6231                    }
6232                }
6233            }
6234        }
6235        if (CB_RECORDING == pCB->state) {
6236            skipCall |=
6237                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6238                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6239                        "vkBeginCommandBuffer(): Cannot call Begin on CB (0x%" PRIxLEAST64
6240                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
6241                        (uint64_t)commandBuffer);
6242        } else if (CB_RECORDED == pCB->state || (CB_INVALID == pCB->state && CMD_END == pCB->cmds.back().type)) {
6243            VkCommandPool cmdPool = pCB->createInfo.commandPool;
6244            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
6245                skipCall |=
6246                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6247                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6248                            "Call to vkBeginCommandBuffer() on command buffer (0x%" PRIxLEAST64
6249                            ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
6250                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6251                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
6252            }
6253            resetCB(dev_data, commandBuffer);
6254        }
6255        // Set updated state here in case implicit reset occurs above
6256        pCB->state = CB_RECORDING;
6257        pCB->beginInfo = *pBeginInfo;
6258        if (pCB->beginInfo.pInheritanceInfo) {
6259            pCB->inheritanceInfo = *(pCB->beginInfo.pInheritanceInfo);
6260            pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo;
6261            // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
6262            if ((pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
6263                (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6264                pCB->activeRenderPass = getRenderPass(dev_data, pCB->beginInfo.pInheritanceInfo->renderPass);
6265                pCB->activeSubpass = pCB->beginInfo.pInheritanceInfo->subpass;
6266                pCB->framebuffers.insert(pCB->beginInfo.pInheritanceInfo->framebuffer);
6267            }
6268        }
6269    } else {
6270        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6271                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
6272                            "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB 0x%p!", (void *)commandBuffer);
6273    }
6274    lock.unlock();
6275    if (skipCall) {
6276        return VK_ERROR_VALIDATION_FAILED_EXT;
6277    }
6278    VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo);
6279
6280    return result;
6281}
6282
6283VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
6284    bool skipCall = false;
6285    VkResult result = VK_SUCCESS;
6286    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6287    std::unique_lock<std::mutex> lock(global_lock);
6288    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6289    if (pCB) {
6290        if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) || !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6291            // This needs spec clarification to update valid usage, see comments in PR:
6292            // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
6293            skipCall |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer");
6294        }
6295        skipCall |= addCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
6296        for (auto query : pCB->activeQueries) {
6297            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6298                                DRAWSTATE_INVALID_QUERY, "DS",
6299                                "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d",
6300                                (uint64_t)(query.pool), query.index);
6301        }
6302    }
6303    if (!skipCall) {
6304        lock.unlock();
6305        result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
6306        lock.lock();
6307        if (VK_SUCCESS == result) {
6308            pCB->state = CB_RECORDED;
6309            // Reset CB status flags
6310            pCB->status = 0;
6311            printCB(dev_data, commandBuffer);
6312        }
6313    } else {
6314        result = VK_ERROR_VALIDATION_FAILED_EXT;
6315    }
6316    lock.unlock();
6317    return result;
6318}
6319
6320VKAPI_ATTR VkResult VKAPI_CALL
6321ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
6322    bool skip_call = false;
6323    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6324    std::unique_lock<std::mutex> lock(global_lock);
6325    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6326    VkCommandPool cmdPool = pCB->createInfo.commandPool;
6327    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
6328        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6329                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6330                             "Attempt to reset command buffer (0x%" PRIxLEAST64 ") created from command pool (0x%" PRIxLEAST64
6331                             ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6332                             (uint64_t)commandBuffer, (uint64_t)cmdPool);
6333    }
6334    skip_call |= checkCommandBufferInFlight(dev_data, pCB, "reset");
6335    lock.unlock();
6336    if (skip_call)
6337        return VK_ERROR_VALIDATION_FAILED_EXT;
6338    VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags);
6339    if (VK_SUCCESS == result) {
6340        lock.lock();
6341        dev_data->globalInFlightCmdBuffers.erase(commandBuffer);
6342        resetCB(dev_data, commandBuffer);
6343        lock.unlock();
6344    }
6345    return result;
6346}
6347
6348VKAPI_ATTR void VKAPI_CALL
6349CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
6350    bool skipCall = false;
6351    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6352    std::unique_lock<std::mutex> lock(global_lock);
6353    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6354    if (pCB) {
6355        skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
6356        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
6357            skipCall |=
6358                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6359                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
6360                        "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
6361                        (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass->renderPass);
6362        }
6363
6364        PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
6365        if (pPN) {
6366            pCB->lastBound[pipelineBindPoint].pipeline = pipeline;
6367            set_cb_pso_status(pCB, pPN);
6368            set_pipeline_state(pPN);
6369        } else {
6370            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6371                                (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
6372                                "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
6373        }
6374    }
6375    lock.unlock();
6376    if (!skipCall)
6377        dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
6378}
6379
6380VKAPI_ATTR void VKAPI_CALL
6381CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
6382    bool skipCall = false;
6383    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6384    std::unique_lock<std::mutex> lock(global_lock);
6385    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6386    if (pCB) {
6387        skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
6388        pCB->status |= CBSTATUS_VIEWPORT_SET;
6389        pCB->viewports.resize(viewportCount);
6390        memcpy(pCB->viewports.data(), pViewports, viewportCount * sizeof(VkViewport));
6391    }
6392    lock.unlock();
6393    if (!skipCall)
6394        dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
6395}
6396
6397VKAPI_ATTR void VKAPI_CALL
6398CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
6399    bool skipCall = false;
6400    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6401    std::unique_lock<std::mutex> lock(global_lock);
6402    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6403    if (pCB) {
6404        skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
6405        pCB->status |= CBSTATUS_SCISSOR_SET;
6406        pCB->scissors.resize(scissorCount);
6407        memcpy(pCB->scissors.data(), pScissors, scissorCount * sizeof(VkRect2D));
6408    }
6409    lock.unlock();
6410    if (!skipCall)
6411        dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
6412}
6413
6414VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
6415    bool skip_call = false;
6416    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6417    std::unique_lock<std::mutex> lock(global_lock);
6418    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6419    if (pCB) {
6420        skip_call |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
6421        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
6422
6423        PIPELINE_NODE *pPipeTrav = getPipeline(dev_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
6424        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
6425            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
6426                                 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SET, "DS",
6427                                 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
6428                                 "flag.  This is undefined behavior and could be ignored.");
6429        } else {
6430            skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
6431        }
6432    }
6433    lock.unlock();
6434    if (!skip_call)
6435        dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth);
6436}
6437
6438VKAPI_ATTR void VKAPI_CALL
6439CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
6440    bool skipCall = false;
6441    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6442    std::unique_lock<std::mutex> lock(global_lock);
6443    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6444    if (pCB) {
6445        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
6446        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
6447    }
6448    lock.unlock();
6449    if (!skipCall)
6450        dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp,
6451                                                         depthBiasSlopeFactor);
6452}
6453
6454VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
6455    bool skipCall = false;
6456    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6457    std::unique_lock<std::mutex> lock(global_lock);
6458    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6459    if (pCB) {
6460        skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
6461        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
6462    }
6463    lock.unlock();
6464    if (!skipCall)
6465        dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants);
6466}
6467
6468VKAPI_ATTR void VKAPI_CALL
6469CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
6470    bool skipCall = false;
6471    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6472    std::unique_lock<std::mutex> lock(global_lock);
6473    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6474    if (pCB) {
6475        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
6476        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
6477    }
6478    lock.unlock();
6479    if (!skipCall)
6480        dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
6481}
6482
6483VKAPI_ATTR void VKAPI_CALL
6484CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
6485    bool skipCall = false;
6486    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6487    std::unique_lock<std::mutex> lock(global_lock);
6488    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6489    if (pCB) {
6490        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
6491        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
6492    }
6493    lock.unlock();
6494    if (!skipCall)
6495        dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
6496}
6497
6498VKAPI_ATTR void VKAPI_CALL
6499CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
6500    bool skipCall = false;
6501    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6502    std::unique_lock<std::mutex> lock(global_lock);
6503    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6504    if (pCB) {
6505        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
6506        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
6507    }
6508    lock.unlock();
6509    if (!skipCall)
6510        dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
6511}
6512
6513VKAPI_ATTR void VKAPI_CALL
6514CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
6515    bool skipCall = false;
6516    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6517    std::unique_lock<std::mutex> lock(global_lock);
6518    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6519    if (pCB) {
6520        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
6521        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
6522    }
6523    lock.unlock();
6524    if (!skipCall)
6525        dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference);
6526}
6527
6528VKAPI_ATTR void VKAPI_CALL
6529CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
6530                      uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
6531                      const uint32_t *pDynamicOffsets) {
6532    bool skipCall = false;
6533    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6534    std::unique_lock<std::mutex> lock(global_lock);
6535    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6536    if (pCB) {
6537        if (pCB->state == CB_RECORDING) {
6538            // Track total count of dynamic descriptor types to make sure we have an offset for each one
6539            uint32_t totalDynamicDescriptors = 0;
6540            string errorString = "";
6541            uint32_t lastSetIndex = firstSet + setCount - 1;
6542            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
6543                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
6544                pCB->lastBound[pipelineBindPoint].dynamicOffsets.resize(lastSetIndex + 1);
6545            }
6546            auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
6547            for (uint32_t i = 0; i < setCount; i++) {
6548                cvdescriptorset::DescriptorSet *pSet = getSetNode(dev_data, pDescriptorSets[i]);
6549                if (pSet) {
6550                    pCB->lastBound[pipelineBindPoint].uniqueBoundSets.insert(pSet);
6551                    pSet->BindCommandBuffer(pCB);
6552                    pCB->lastBound[pipelineBindPoint].pipelineLayout = layout;
6553                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pSet;
6554                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
6555                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6556                                        DRAWSTATE_NONE, "DS", "DS 0x%" PRIxLEAST64 " bound on pipeline %s",
6557                                        (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
6558                    if (!pSet->IsUpdated() && (pSet->GetTotalDescriptorCount() != 0)) {
6559                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6560                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
6561                                            __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
6562                                            "DS 0x%" PRIxLEAST64
6563                                            " bound but it was never updated. You may want to either update it or not bind it.",
6564                                            (uint64_t)pDescriptorSets[i]);
6565                    }
6566                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
6567                    if (!verify_set_layout_compatibility(dev_data, pSet, layout, i + firstSet, errorString)) {
6568                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6569                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6570                                            DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
6571                                            "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
6572                                            "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s",
6573                                            i, i + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str());
6574                    }
6575
6576                    auto setDynamicDescriptorCount = pSet->GetDynamicDescriptorCount();
6577
6578                    pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i].clear();
6579
6580                    if (setDynamicDescriptorCount) {
6581                        // First make sure we won't overstep bounds of pDynamicOffsets array
6582                        if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) {
6583                            skipCall |=
6584                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6585                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6586                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
6587                                        "descriptorSet #%u (0x%" PRIxLEAST64
6588                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
6589                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
6590                                        i, (uint64_t)pDescriptorSets[i], pSet->GetDynamicDescriptorCount(),
6591                                        (dynamicOffsetCount - totalDynamicDescriptors));
6592                        } else { // Validate and store dynamic offsets with the set
6593                            // Validate Dynamic Offset Minimums
6594                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
6595                            for (uint32_t d = 0; d < pSet->GetTotalDescriptorCount(); d++) {
6596                                if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
6597                                    if (vk_safe_modulo(
6598                                            pDynamicOffsets[cur_dyn_offset],
6599                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
6600                                        skipCall |= log_msg(
6601                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6602                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
6603                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
6604                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6605                                            "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64,
6606                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6607                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
6608                                    }
6609                                    cur_dyn_offset++;
6610                                } else if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
6611                                    if (vk_safe_modulo(
6612                                            pDynamicOffsets[cur_dyn_offset],
6613                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
6614                                        skipCall |= log_msg(
6615                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6616                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
6617                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
6618                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6619                                            "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64,
6620                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6621                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
6622                                    }
6623                                    cur_dyn_offset++;
6624                                }
6625                            }
6626
6627                            pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i] =
6628                                std::vector<uint32_t>(pDynamicOffsets + totalDynamicDescriptors,
6629                                                      pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount);
6630                            // Keep running total of dynamic descriptor count to verify at the end
6631                            totalDynamicDescriptors += setDynamicDescriptorCount;
6632
6633                        }
6634                    }
6635                } else {
6636                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6637                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6638                                        DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS 0x%" PRIxLEAST64 " that doesn't exist!",
6639                                        (uint64_t)pDescriptorSets[i]);
6640                }
6641                skipCall |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
6642                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
6643                if (firstSet > 0) { // Check set #s below the first bound set
6644                    for (uint32_t i = 0; i < firstSet; ++i) {
6645                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
6646                            !verify_set_layout_compatibility(dev_data, pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i],
6647                                                             layout, i, errorString)) {
6648                            skipCall |= log_msg(
6649                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
6650                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6651                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
6652                                "DescriptorSetDS 0x%" PRIxLEAST64
6653                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
6654                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
6655                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
6656                        }
6657                    }
6658                }
6659                // Check if newly last bound set invalidates any remaining bound sets
6660                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
6661                    if (oldFinalBoundSet &&
6662                        !verify_set_layout_compatibility(dev_data, oldFinalBoundSet, layout, lastSetIndex, errorString)) {
6663                        auto old_set = oldFinalBoundSet->GetSet();
6664                        skipCall |=
6665                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
6666                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__,
6667                                    DRAWSTATE_NONE, "DS", "DescriptorSetDS 0x%" PRIxLEAST64
6668                                                          " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
6669                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
6670                                                          "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
6671                                    reinterpret_cast<uint64_t &>(old_set), lastSetIndex,
6672                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
6673                                    lastSetIndex + 1, (uint64_t)layout);
6674                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
6675                    }
6676                }
6677            }
6678            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
6679            if (totalDynamicDescriptors != dynamicOffsetCount) {
6680                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6681                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
6682                                    DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
6683                                    "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
6684                                    "is %u. It should exactly match the number of dynamic descriptors.",
6685                                    setCount, totalDynamicDescriptors, dynamicOffsetCount);
6686            }
6687        } else {
6688            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
6689        }
6690    }
6691    lock.unlock();
6692    if (!skipCall)
6693        dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
6694                                                               pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
6695}
6696
6697VKAPI_ATTR void VKAPI_CALL
6698CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
6699    bool skipCall = false;
6700    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6701    // TODO : Somewhere need to verify that IBs have correct usage state flagged
6702    std::unique_lock<std::mutex> lock(global_lock);
6703    VkDeviceMemory mem;
6704    skipCall =
6705        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6706    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
6707    if (cb_data != dev_data->commandBufferMap.end()) {
6708        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindIndexBuffer()"); };
6709        cb_data->second->validate_functions.push_back(function);
6710        skipCall |= addCmd(dev_data, cb_data->second, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
6711        VkDeviceSize offset_align = 0;
6712        switch (indexType) {
6713        case VK_INDEX_TYPE_UINT16:
6714            offset_align = 2;
6715            break;
6716        case VK_INDEX_TYPE_UINT32:
6717            offset_align = 4;
6718            break;
6719        default:
6720            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
6721            break;
6722        }
6723        if (!offset_align || (offset % offset_align)) {
6724            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6725                                DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
6726                                "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
6727                                offset, string_VkIndexType(indexType));
6728        }
6729        cb_data->second->status |= CBSTATUS_INDEX_BUFFER_BOUND;
6730    }
6731    lock.unlock();
6732    if (!skipCall)
6733        dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
6734}
6735
6736void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
6737    uint32_t end = firstBinding + bindingCount;
6738    if (pCB->currentDrawData.buffers.size() < end) {
6739        pCB->currentDrawData.buffers.resize(end);
6740    }
6741    for (uint32_t i = 0; i < bindingCount; ++i) {
6742        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
6743    }
6744}
6745
6746static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
6747
6748VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
6749                                                uint32_t bindingCount, const VkBuffer *pBuffers,
6750                                                const VkDeviceSize *pOffsets) {
6751    bool skipCall = false;
6752    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6753    // TODO : Somewhere need to verify that VBs have correct usage state flagged
6754    std::unique_lock<std::mutex> lock(global_lock);
6755    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
6756    if (cb_data != dev_data->commandBufferMap.end()) {
6757        for (uint32_t i = 0; i < bindingCount; ++i) {
6758            VkDeviceMemory mem;
6759            skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)pBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6760
6761            std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindVertexBuffers()"); };
6762            cb_data->second->validate_functions.push_back(function);
6763        }
6764        addCmd(dev_data, cb_data->second, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
6765        updateResourceTracking(cb_data->second, firstBinding, bindingCount, pBuffers);
6766    } else {
6767        skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
6768    }
6769    lock.unlock();
6770    if (!skipCall)
6771        dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
6772}
6773
6774/* expects global_lock to be held by caller */
6775static bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
6776    bool skip_call = false;
6777
6778    for (auto imageView : pCB->updateImages) {
6779        auto iv_data = getImageViewData(dev_data, imageView);
6780        if (!iv_data)
6781            continue;
6782        VkImage image = iv_data->image;
6783        VkDeviceMemory mem;
6784        skip_call |=
6785            get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
6786        std::function<bool()> function = [=]() {
6787            set_memory_valid(dev_data, mem, true, image);
6788            return false;
6789        };
6790        pCB->validate_functions.push_back(function);
6791    }
6792    for (auto buffer : pCB->updateBuffers) {
6793        VkDeviceMemory mem;
6794        skip_call |= get_mem_binding_from_object(dev_data, (uint64_t)buffer,
6795                                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6796        std::function<bool()> function = [=]() {
6797            set_memory_valid(dev_data, mem, true);
6798            return false;
6799        };
6800        pCB->validate_functions.push_back(function);
6801    }
6802    return skip_call;
6803}
6804
6805VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
6806                                   uint32_t firstVertex, uint32_t firstInstance) {
6807    bool skipCall = false;
6808    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6809    std::unique_lock<std::mutex> lock(global_lock);
6810    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6811    if (pCB) {
6812        skipCall |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
6813        pCB->drawCount[DRAW]++;
6814        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS);
6815        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6816        // TODO : Need to pass commandBuffer as srcObj here
6817        skipCall |=
6818            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
6819                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW]++);
6820        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6821        if (!skipCall) {
6822            updateResourceTrackingOnDraw(pCB);
6823        }
6824        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
6825    }
6826    lock.unlock();
6827    if (!skipCall)
6828        dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
6829}
6830
6831VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
6832                                          uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
6833                                                            uint32_t firstInstance) {
6834    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6835    bool skipCall = false;
6836    std::unique_lock<std::mutex> lock(global_lock);
6837    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6838    if (pCB) {
6839        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
6840        pCB->drawCount[DRAW_INDEXED]++;
6841        skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS);
6842        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6843        // TODO : Need to pass commandBuffer as srcObj here
6844        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
6845                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
6846                            "vkCmdDrawIndexed() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
6847        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6848        if (!skipCall) {
6849            updateResourceTrackingOnDraw(pCB);
6850        }
6851        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
6852    }
6853    lock.unlock();
6854    if (!skipCall)
6855        dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
6856                                                        firstInstance);
6857}
6858
6859VKAPI_ATTR void VKAPI_CALL
6860CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
6861    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6862    bool skipCall = false;
6863    std::unique_lock<std::mutex> lock(global_lock);
6864    VkDeviceMemory mem;
6865    // MTMTODO : merge with code below
6866    skipCall =
6867        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6868    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndirect");
6869    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6870    if (pCB) {
6871        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
6872        pCB->drawCount[DRAW_INDIRECT]++;
6873        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS);
6874        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6875        // TODO : Need to pass commandBuffer as srcObj here
6876        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
6877                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
6878                            "vkCmdDrawIndirect() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
6879        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6880        if (!skipCall) {
6881            updateResourceTrackingOnDraw(pCB);
6882        }
6883        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndirect");
6884    }
6885    lock.unlock();
6886    if (!skipCall)
6887        dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
6888}
6889
6890VKAPI_ATTR void VKAPI_CALL
6891CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
6892    bool skipCall = false;
6893    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6894    std::unique_lock<std::mutex> lock(global_lock);
6895    VkDeviceMemory mem;
6896    // MTMTODO : merge with code below
6897    skipCall =
6898        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6899    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndexedIndirect");
6900    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6901    if (pCB) {
6902        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
6903        pCB->drawCount[DRAW_INDEXED_INDIRECT]++;
6904        skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS);
6905        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6906        // TODO : Need to pass commandBuffer as srcObj here
6907        skipCall |=
6908            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
6909                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call 0x%" PRIx64 ", reporting DS state:",
6910                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
6911        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6912        if (!skipCall) {
6913            updateResourceTrackingOnDraw(pCB);
6914        }
6915        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexedIndirect");
6916    }
6917    lock.unlock();
6918    if (!skipCall)
6919        dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
6920}
6921
6922VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
6923    bool skipCall = false;
6924    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6925    std::unique_lock<std::mutex> lock(global_lock);
6926    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6927    if (pCB) {
6928        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE);
6929        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6930        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
6931        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
6932    }
6933    lock.unlock();
6934    if (!skipCall)
6935        dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
6936}
6937
6938VKAPI_ATTR void VKAPI_CALL
6939CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
6940    bool skipCall = false;
6941    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6942    std::unique_lock<std::mutex> lock(global_lock);
6943    VkDeviceMemory mem;
6944    skipCall =
6945        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6946    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDispatchIndirect");
6947    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6948    if (pCB) {
6949        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE);
6950        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6951        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
6952        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatchIndirect");
6953    }
6954    lock.unlock();
6955    if (!skipCall)
6956        dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset);
6957}
6958
6959VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
6960                                         uint32_t regionCount, const VkBufferCopy *pRegions) {
6961    bool skipCall = false;
6962    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6963    std::unique_lock<std::mutex> lock(global_lock);
6964    VkDeviceMemory src_mem, dst_mem;
6965    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &src_mem);
6966    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyBuffer");
6967    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &dst_mem);
6968
6969    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyBuffer");
6970    // Validate that SRC & DST buffers have correct usage flags set
6971    skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
6972                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
6973    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
6974                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
6975    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
6976    if (cb_data != dev_data->commandBufferMap.end()) {
6977        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyBuffer()"); };
6978        cb_data->second->validate_functions.push_back(function);
6979        function = [=]() {
6980            set_memory_valid(dev_data, dst_mem, true);
6981            return false;
6982        };
6983        cb_data->second->validate_functions.push_back(function);
6984
6985        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
6986        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyBuffer");
6987    }
6988    lock.unlock();
6989    if (!skipCall)
6990        dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
6991}
6992
6993static bool VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, VkImageSubresourceLayers subLayers,
6994                                    VkImageLayout srcImageLayout) {
6995    bool skip_call = false;
6996
6997    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
6998    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
6999    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7000        uint32_t layer = i + subLayers.baseArrayLayer;
7001        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7002        IMAGE_CMD_BUF_LAYOUT_NODE node;
7003        if (!FindLayout(pCB, srcImage, sub, node)) {
7004            SetLayout(pCB, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
7005            continue;
7006        }
7007        if (node.layout != srcImageLayout) {
7008            // TODO: Improve log message in the next pass
7009            skip_call |=
7010                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7011                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
7012                                                                        "and doesn't match the current layout %s.",
7013                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
7014        }
7015    }
7016    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
7017        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7018            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7019            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7020                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7021                                 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
7022        } else {
7023            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7024                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
7025                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
7026                                 string_VkImageLayout(srcImageLayout));
7027        }
7028    }
7029    return skip_call;
7030}
7031
7032static bool VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkImageSubresourceLayers subLayers,
7033                                  VkImageLayout destImageLayout) {
7034    bool skip_call = false;
7035
7036    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7037    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7038    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7039        uint32_t layer = i + subLayers.baseArrayLayer;
7040        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7041        IMAGE_CMD_BUF_LAYOUT_NODE node;
7042        if (!FindLayout(pCB, destImage, sub, node)) {
7043            SetLayout(pCB, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
7044            continue;
7045        }
7046        if (node.layout != destImageLayout) {
7047            skip_call |=
7048                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7049                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7050                                                                        "doesn't match the current layout %s.",
7051                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7052        }
7053    }
7054    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7055        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7056            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7057            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7058                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7059                                 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7060        } else {
7061            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7062                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
7063                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
7064                                 string_VkImageLayout(destImageLayout));
7065        }
7066    }
7067    return skip_call;
7068}
7069
7070VKAPI_ATTR void VKAPI_CALL
7071CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7072             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
7073    bool skipCall = false;
7074    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7075    std::unique_lock<std::mutex> lock(global_lock);
7076    VkDeviceMemory src_mem, dst_mem;
7077    // Validate that src & dst images have correct usage flags set
7078    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7079    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyImage");
7080
7081    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7082    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyImage");
7083    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7084                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7085    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7086                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7087    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7088    if (cb_data != dev_data->commandBufferMap.end()) {
7089        std::function<bool()> function = [=]() {
7090            return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyImage()", srcImage);
7091        };
7092        cb_data->second->validate_functions.push_back(function);
7093        function = [=]() {
7094            set_memory_valid(dev_data, dst_mem, true, dstImage);
7095            return false;
7096        };
7097        cb_data->second->validate_functions.push_back(function);
7098
7099        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYIMAGE, "vkCmdCopyImage()");
7100        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyImage");
7101        for (uint32_t i = 0; i < regionCount; ++i) {
7102            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].srcSubresource, srcImageLayout);
7103            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].dstSubresource, dstImageLayout);
7104        }
7105    }
7106    lock.unlock();
7107    if (!skipCall)
7108        dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7109                                                      regionCount, pRegions);
7110}
7111
7112VKAPI_ATTR void VKAPI_CALL
7113CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7114             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
7115    bool skipCall = false;
7116    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7117    std::unique_lock<std::mutex> lock(global_lock);
7118    VkDeviceMemory src_mem, dst_mem;
7119    // Validate that src & dst images have correct usage flags set
7120    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7121    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdBlitImage");
7122
7123    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7124    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdBlitImage");
7125    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7126                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7127    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7128                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7129
7130    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7131    if (cb_data != dev_data->commandBufferMap.end()) {
7132        std::function<bool()> function = [=]() {
7133            return validate_memory_is_valid(dev_data, src_mem, "vkCmdBlitImage()", srcImage);
7134        };
7135        cb_data->second->validate_functions.push_back(function);
7136        function = [=]() {
7137            set_memory_valid(dev_data, dst_mem, true, dstImage);
7138            return false;
7139        };
7140        cb_data->second->validate_functions.push_back(function);
7141
7142        skipCall |= addCmd(dev_data, cb_data->second, CMD_BLITIMAGE, "vkCmdBlitImage()");
7143        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdBlitImage");
7144    }
7145    lock.unlock();
7146    if (!skipCall)
7147        dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7148                                                      regionCount, pRegions, filter);
7149}
7150
7151VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
7152                                                VkImage dstImage, VkImageLayout dstImageLayout,
7153                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7154    bool skipCall = false;
7155    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7156    std::unique_lock<std::mutex> lock(global_lock);
7157    VkDeviceMemory dst_mem, src_mem;
7158    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7159    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyBufferToImage");
7160
7161    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &src_mem);
7162    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyBufferToImage");
7163    // Validate that src buff & dst image have correct usage flags set
7164    skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyBufferToImage()",
7165                                            "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7166    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyBufferToImage()",
7167                                           "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7168    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7169    if (cb_data != dev_data->commandBufferMap.end()) {
7170        std::function<bool()> function = [=]() {
7171            set_memory_valid(dev_data, dst_mem, true, dstImage);
7172            return false;
7173        };
7174        cb_data->second->validate_functions.push_back(function);
7175        function = [=]() { return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyBufferToImage()"); };
7176        cb_data->second->validate_functions.push_back(function);
7177
7178        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
7179        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyBufferToImage");
7180        for (uint32_t i = 0; i < regionCount; ++i) {
7181            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].imageSubresource, dstImageLayout);
7182        }
7183    }
7184    lock.unlock();
7185    if (!skipCall)
7186        dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
7187                                                              pRegions);
7188}
7189
7190VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
7191                                                VkImageLayout srcImageLayout, VkBuffer dstBuffer,
7192                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7193    bool skipCall = false;
7194    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7195    std::unique_lock<std::mutex> lock(global_lock);
7196    VkDeviceMemory src_mem, dst_mem;
7197    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7198    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyImageToBuffer");
7199
7200    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &dst_mem);
7201    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyImageToBuffer");
7202    // Validate that dst buff & src image have correct usage flags set
7203    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyImageToBuffer()",
7204                                           "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7205    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyImageToBuffer()",
7206                                            "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7207
7208    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7209    if (cb_data != dev_data->commandBufferMap.end()) {
7210        std::function<bool()> function = [=]() {
7211            return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyImageToBuffer()", srcImage);
7212        };
7213        cb_data->second->validate_functions.push_back(function);
7214        function = [=]() {
7215            set_memory_valid(dev_data, dst_mem, true);
7216            return false;
7217        };
7218        cb_data->second->validate_functions.push_back(function);
7219
7220        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
7221        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyImageToBuffer");
7222        for (uint32_t i = 0; i < regionCount; ++i) {
7223            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].imageSubresource, srcImageLayout);
7224        }
7225    }
7226    lock.unlock();
7227    if (!skipCall)
7228        dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
7229                                                              pRegions);
7230}
7231
7232VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
7233                                           VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
7234    bool skipCall = false;
7235    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7236    std::unique_lock<std::mutex> lock(global_lock);
7237    VkDeviceMemory mem;
7238    skipCall =
7239        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7240    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdUpdateBuffer");
7241    // Validate that dst buff has correct usage flags set
7242    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdUpdateBuffer()",
7243                                            "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7244
7245    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7246    if (cb_data != dev_data->commandBufferMap.end()) {
7247        std::function<bool()> function = [=]() {
7248            set_memory_valid(dev_data, mem, true);
7249            return false;
7250        };
7251        cb_data->second->validate_functions.push_back(function);
7252
7253        skipCall |= addCmd(dev_data, cb_data->second, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
7254        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyUpdateBuffer");
7255    }
7256    lock.unlock();
7257    if (!skipCall)
7258        dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
7259}
7260
7261VKAPI_ATTR void VKAPI_CALL
7262CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
7263    bool skipCall = false;
7264    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7265    std::unique_lock<std::mutex> lock(global_lock);
7266    VkDeviceMemory mem;
7267    skipCall =
7268        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7269    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdFillBuffer");
7270    // Validate that dst buff has correct usage flags set
7271    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdFillBuffer()",
7272                                            "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7273
7274    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7275    if (cb_data != dev_data->commandBufferMap.end()) {
7276        std::function<bool()> function = [=]() {
7277            set_memory_valid(dev_data, mem, true);
7278            return false;
7279        };
7280        cb_data->second->validate_functions.push_back(function);
7281
7282        skipCall |= addCmd(dev_data, cb_data->second, CMD_FILLBUFFER, "vkCmdFillBuffer()");
7283        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyFillBuffer");
7284    }
7285    lock.unlock();
7286    if (!skipCall)
7287        dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
7288}
7289
7290VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
7291                                               const VkClearAttachment *pAttachments, uint32_t rectCount,
7292                                               const VkClearRect *pRects) {
7293    bool skipCall = false;
7294    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7295    std::unique_lock<std::mutex> lock(global_lock);
7296    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7297    if (pCB) {
7298        skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
7299        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
7300        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
7301            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
7302            // TODO : commandBuffer should be srcObj
7303            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
7304            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
7305            // call CmdClearAttachments
7306            // Otherwise this seems more like a performance warning.
7307            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7308                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
7309                                "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
7310                                " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
7311                                (uint64_t)(commandBuffer));
7312        }
7313        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments");
7314    }
7315
7316    // Validate that attachment is in reference list of active subpass
7317    if (pCB->activeRenderPass) {
7318        const VkRenderPassCreateInfo *pRPCI = pCB->activeRenderPass->pCreateInfo;
7319        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
7320
7321        for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
7322            const VkClearAttachment *attachment = &pAttachments[attachment_idx];
7323            if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
7324                bool found = false;
7325                for (uint32_t i = 0; i < pSD->colorAttachmentCount; i++) {
7326                    if (attachment->colorAttachment == pSD->pColorAttachments[i].attachment) {
7327                        found = true;
7328                        break;
7329                    }
7330                }
7331                if (!found) {
7332                    skipCall |= log_msg(
7333                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7334                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
7335                        "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d",
7336                        attachment->colorAttachment, pCB->activeSubpass);
7337                }
7338            } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
7339                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
7340                    (pSD->pDepthStencilAttachment->attachment ==
7341                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
7342
7343                    skipCall |= log_msg(
7344                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7345                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
7346                        "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found "
7347                        "in active subpass %d",
7348                        attachment->colorAttachment,
7349                        (pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED,
7350                        pCB->activeSubpass);
7351                }
7352            }
7353        }
7354    }
7355    lock.unlock();
7356    if (!skipCall)
7357        dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
7358}
7359
7360VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
7361                                              VkImageLayout imageLayout, const VkClearColorValue *pColor,
7362                                              uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
7363    bool skipCall = false;
7364    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7365    std::unique_lock<std::mutex> lock(global_lock);
7366    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
7367    VkDeviceMemory mem;
7368    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7369    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearColorImage");
7370    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7371    if (cb_data != dev_data->commandBufferMap.end()) {
7372        std::function<bool()> function = [=]() {
7373            set_memory_valid(dev_data, mem, true, image);
7374            return false;
7375        };
7376        cb_data->second->validate_functions.push_back(function);
7377
7378        skipCall |= addCmd(dev_data, cb_data->second, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
7379        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdClearColorImage");
7380    }
7381    lock.unlock();
7382    if (!skipCall)
7383        dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
7384}
7385
7386VKAPI_ATTR void VKAPI_CALL
7387CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
7388                          const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
7389                          const VkImageSubresourceRange *pRanges) {
7390    bool skipCall = false;
7391    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7392    std::unique_lock<std::mutex> lock(global_lock);
7393    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
7394    VkDeviceMemory mem;
7395    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7396    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearDepthStencilImage");
7397    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7398    if (cb_data != dev_data->commandBufferMap.end()) {
7399        std::function<bool()> function = [=]() {
7400            set_memory_valid(dev_data, mem, true, image);
7401            return false;
7402        };
7403        cb_data->second->validate_functions.push_back(function);
7404
7405        skipCall |= addCmd(dev_data, cb_data->second, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
7406        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdClearDepthStencilImage");
7407    }
7408    lock.unlock();
7409    if (!skipCall)
7410        dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
7411                                                                   pRanges);
7412}
7413
7414VKAPI_ATTR void VKAPI_CALL
7415CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7416                VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
7417    bool skipCall = false;
7418    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7419    std::unique_lock<std::mutex> lock(global_lock);
7420    VkDeviceMemory src_mem, dst_mem;
7421    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7422    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdResolveImage");
7423
7424    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7425    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdResolveImage");
7426    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7427    if (cb_data != dev_data->commandBufferMap.end()) {
7428        std::function<bool()> function = [=]() {
7429            return validate_memory_is_valid(dev_data, src_mem, "vkCmdResolveImage()", srcImage);
7430        };
7431        cb_data->second->validate_functions.push_back(function);
7432        function = [=]() {
7433            set_memory_valid(dev_data, dst_mem, true, dstImage);
7434            return false;
7435        };
7436        cb_data->second->validate_functions.push_back(function);
7437
7438        skipCall |= addCmd(dev_data, cb_data->second, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
7439        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdResolveImage");
7440    }
7441    lock.unlock();
7442    if (!skipCall)
7443        dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7444                                                         regionCount, pRegions);
7445}
7446
7447bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7448    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7449    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7450    if (pCB) {
7451        pCB->eventToStageMap[event] = stageMask;
7452    }
7453    auto queue_data = dev_data->queueMap.find(queue);
7454    if (queue_data != dev_data->queueMap.end()) {
7455        queue_data->second.eventToStageMap[event] = stageMask;
7456    }
7457    return false;
7458}
7459
7460VKAPI_ATTR void VKAPI_CALL
7461CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7462    bool skipCall = false;
7463    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7464    std::unique_lock<std::mutex> lock(global_lock);
7465    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7466    if (pCB) {
7467        skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
7468        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
7469        pCB->events.push_back(event);
7470        if (!pCB->waitedEvents.count(event)) {
7471            pCB->writeEventsBeforeWait.push_back(event);
7472        }
7473        std::function<bool(VkQueue)> eventUpdate =
7474            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
7475        pCB->eventUpdates.push_back(eventUpdate);
7476    }
7477    lock.unlock();
7478    if (!skipCall)
7479        dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask);
7480}
7481
7482VKAPI_ATTR void VKAPI_CALL
7483CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7484    bool skipCall = false;
7485    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7486    std::unique_lock<std::mutex> lock(global_lock);
7487    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7488    if (pCB) {
7489        skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
7490        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
7491        pCB->events.push_back(event);
7492        if (!pCB->waitedEvents.count(event)) {
7493            pCB->writeEventsBeforeWait.push_back(event);
7494        }
7495        std::function<bool(VkQueue)> eventUpdate =
7496            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
7497        pCB->eventUpdates.push_back(eventUpdate);
7498    }
7499    lock.unlock();
7500    if (!skipCall)
7501        dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask);
7502}
7503
7504static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
7505                                   const VkImageMemoryBarrier *pImgMemBarriers) {
7506    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7507    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7508    bool skip = false;
7509    uint32_t levelCount = 0;
7510    uint32_t layerCount = 0;
7511
7512    for (uint32_t i = 0; i < memBarrierCount; ++i) {
7513        auto mem_barrier = &pImgMemBarriers[i];
7514        if (!mem_barrier)
7515            continue;
7516        // TODO: Do not iterate over every possibility - consolidate where
7517        // possible
7518        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
7519
7520        for (uint32_t j = 0; j < levelCount; j++) {
7521            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
7522            for (uint32_t k = 0; k < layerCount; k++) {
7523                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
7524                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
7525                IMAGE_CMD_BUF_LAYOUT_NODE node;
7526                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
7527                    SetLayout(pCB, mem_barrier->image, sub,
7528                              IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
7529                    continue;
7530                }
7531                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
7532                    // TODO: Set memory invalid which is in mem_tracker currently
7533                } else if (node.layout != mem_barrier->oldLayout) {
7534                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7535                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
7536                                                                                    "when current layout is %s.",
7537                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
7538                }
7539                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
7540            }
7541        }
7542    }
7543    return skip;
7544}
7545
7546// Print readable FlagBits in FlagMask
7547static std::string string_VkAccessFlags(VkAccessFlags accessMask) {
7548    std::string result;
7549    std::string separator;
7550
7551    if (accessMask == 0) {
7552        result = "[None]";
7553    } else {
7554        result = "[";
7555        for (auto i = 0; i < 32; i++) {
7556            if (accessMask & (1 << i)) {
7557                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
7558                separator = " | ";
7559            }
7560        }
7561        result = result + "]";
7562    }
7563    return result;
7564}
7565
7566// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
7567// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
7568// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
7569static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
7570                             const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
7571                             const char *type) {
7572    bool skip_call = false;
7573
7574    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
7575        if (accessMask & ~(required_bit | optional_bits)) {
7576            // TODO: Verify against Valid Use
7577            skip_call |=
7578                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7579                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
7580                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
7581        }
7582    } else {
7583        if (!required_bit) {
7584            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7585                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
7586                                                                  "%s when layout is %s, unless the app has previously added a "
7587                                                                  "barrier for this transition.",
7588                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
7589                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
7590        } else {
7591            std::string opt_bits;
7592            if (optional_bits != 0) {
7593                std::stringstream ss;
7594                ss << optional_bits;
7595                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
7596            }
7597            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7598                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
7599                                                                  "layout is %s, unless the app has previously added a barrier for "
7600                                                                  "this transition.",
7601                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
7602                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
7603        }
7604    }
7605    return skip_call;
7606}
7607
7608static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
7609                                        const VkImageLayout &layout, const char *type) {
7610    bool skip_call = false;
7611    switch (layout) {
7612    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
7613        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
7614                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
7615        break;
7616    }
7617    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
7618        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
7619                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
7620        break;
7621    }
7622    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
7623        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
7624        break;
7625    }
7626    case VK_IMAGE_LAYOUT_PREINITIALIZED: {
7627        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
7628        break;
7629    }
7630    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
7631        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
7632                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
7633        break;
7634    }
7635    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
7636        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
7637                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
7638        break;
7639    }
7640    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
7641        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
7642        break;
7643    }
7644    case VK_IMAGE_LAYOUT_UNDEFINED: {
7645        if (accessMask != 0) {
7646            // TODO: Verify against Valid Use section spec
7647            skip_call |=
7648                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7649                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
7650                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
7651        }
7652        break;
7653    }
7654    case VK_IMAGE_LAYOUT_GENERAL:
7655    default: { break; }
7656    }
7657    return skip_call;
7658}
7659
7660static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
7661                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
7662                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
7663                             const VkImageMemoryBarrier *pImageMemBarriers) {
7664    bool skip_call = false;
7665    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7666    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7667    if (pCB->activeRenderPass && memBarrierCount) {
7668        if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
7669            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7670                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
7671                                                                  "with no self dependency specified.",
7672                                 funcName, pCB->activeSubpass);
7673        }
7674    }
7675    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
7676        auto mem_barrier = &pImageMemBarriers[i];
7677        auto image_data = getImageNode(dev_data, mem_barrier->image);
7678        if (image_data) {
7679            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
7680            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
7681            if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
7682                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
7683                // be VK_QUEUE_FAMILY_IGNORED
7684                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
7685                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7686                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7687                                         "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
7688                                         "VK_SHARING_MODE_CONCURRENT.  Src and dst "
7689                                         " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
7690                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
7691                }
7692            } else {
7693                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
7694                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
7695                // or both be a valid queue family
7696                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
7697                    (src_q_f_index != dst_q_f_index)) {
7698                    skip_call |=
7699                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7700                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
7701                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
7702                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
7703                                                                     "must be.",
7704                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
7705                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
7706                           ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
7707                            (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
7708                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7709                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7710                                         "%s: Image 0x%" PRIx64 " was created with sharingMode "
7711                                         "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
7712                                         " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
7713                                         "queueFamilies crated for this device.",
7714                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
7715                                         dst_q_f_index, dev_data->phys_dev_properties.queue_family_properties.size());
7716                }
7717            }
7718        }
7719
7720        if (mem_barrier) {
7721            skip_call |=
7722                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
7723            skip_call |=
7724                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
7725            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
7726                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7727                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
7728                                                         "PREINITIALIZED.",
7729                        funcName);
7730            }
7731            auto image_data = getImageNode(dev_data, mem_barrier->image);
7732            VkFormat format = VK_FORMAT_UNDEFINED;
7733            uint32_t arrayLayers = 0, mipLevels = 0;
7734            bool imageFound = false;
7735            if (image_data) {
7736                format = image_data->createInfo.format;
7737                arrayLayers = image_data->createInfo.arrayLayers;
7738                mipLevels = image_data->createInfo.mipLevels;
7739                imageFound = true;
7740            } else if (dev_data->device_extensions.wsi_enabled) {
7741                auto imageswap_data = getSwapchainFromImage(dev_data, mem_barrier->image);
7742                if (imageswap_data) {
7743                    auto swapchain_data = getSwapchainNode(dev_data, imageswap_data);
7744                    if (swapchain_data) {
7745                        format = swapchain_data->createInfo.imageFormat;
7746                        arrayLayers = swapchain_data->createInfo.imageArrayLayers;
7747                        mipLevels = 1;
7748                        imageFound = true;
7749                    }
7750                }
7751            }
7752            if (imageFound) {
7753                if (vk_format_is_depth_and_stencil(format) &&
7754                    (!(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
7755                     !(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))) {
7756                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7757                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth and stencil format and thus must "
7758                                                             "have both VK_IMAGE_ASPECT_DEPTH_BIT and "
7759                                                             "VK_IMAGE_ASPECT_STENCIL_BIT set.",
7760                            funcName);
7761                }
7762                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
7763                                     ? 1
7764                                     : mem_barrier->subresourceRange.layerCount;
7765                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
7766                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7767                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
7768                                                             "baseArrayLayer (%d) and layerCount (%d) be less "
7769                                                             "than or equal to the total number of layers (%d).",
7770                            funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
7771                            arrayLayers);
7772                }
7773                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
7774                                     ? 1
7775                                     : mem_barrier->subresourceRange.levelCount;
7776                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
7777                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7778                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
7779                                                             "(%d) and levelCount (%d) be less than or equal to "
7780                                                             "the total number of levels (%d).",
7781                            funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
7782                            mipLevels);
7783                }
7784            }
7785        }
7786    }
7787    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
7788        auto mem_barrier = &pBufferMemBarriers[i];
7789        if (pCB->activeRenderPass) {
7790            skip_call |=
7791                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7792                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
7793        }
7794        if (!mem_barrier)
7795            continue;
7796
7797        // Validate buffer barrier queue family indices
7798        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
7799             mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
7800            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
7801             mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
7802            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7803                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7804                                 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
7805                                 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
7806                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
7807                                 dev_data->phys_dev_properties.queue_family_properties.size());
7808        }
7809
7810        auto buffer_node = getBufferNode(dev_data, mem_barrier->buffer);
7811        if (buffer_node) {
7812            VkDeviceSize buffer_size =
7813                (buffer_node->createInfo.sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO) ? buffer_node->createInfo.size : 0;
7814            if (mem_barrier->offset >= buffer_size) {
7815                skip_call |= log_msg(
7816                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7817                    DRAWSTATE_INVALID_BARRIER, "DS",
7818                    "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
7819                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
7820                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(buffer_size));
7821            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
7822                skip_call |= log_msg(
7823                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7824                    DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
7825                                                     " whose sum is greater than total size 0x%" PRIx64 ".",
7826                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
7827                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
7828                    reinterpret_cast<const uint64_t &>(buffer_size));
7829            }
7830        }
7831    }
7832    return skip_call;
7833}
7834
7835bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
7836    bool skip_call = false;
7837    VkPipelineStageFlags stageMask = 0;
7838    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
7839    for (uint32_t i = 0; i < eventCount; ++i) {
7840        auto event = pCB->events[firstEventIndex + i];
7841        auto queue_data = dev_data->queueMap.find(queue);
7842        if (queue_data == dev_data->queueMap.end())
7843            return false;
7844        auto event_data = queue_data->second.eventToStageMap.find(event);
7845        if (event_data != queue_data->second.eventToStageMap.end()) {
7846            stageMask |= event_data->second;
7847        } else {
7848            auto global_event_data = dev_data->eventMap.find(event);
7849            if (global_event_data == dev_data->eventMap.end()) {
7850                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
7851                                     reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
7852                                     "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
7853                                     reinterpret_cast<const uint64_t &>(event));
7854            } else {
7855                stageMask |= global_event_data->second.stageMask;
7856            }
7857        }
7858    }
7859    // TODO: Need to validate that host_bit is only set if set event is called
7860    // but set event can be called at any time.
7861    if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
7862        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7863                             DRAWSTATE_INVALID_EVENT, "DS", "Submitting cmdbuffer with call to VkCmdWaitEvents "
7864                                                            "using srcStageMask 0x%X which must be the bitwise "
7865                                                            "OR of the stageMask parameters used in calls to "
7866                                                            "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
7867                                                            "used with vkSetEvent but instead is 0x%X.",
7868                             sourceStageMask, stageMask);
7869    }
7870    return skip_call;
7871}
7872
7873VKAPI_ATTR void VKAPI_CALL
7874CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
7875              VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
7876              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
7877              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
7878    bool skipCall = false;
7879    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7880    std::unique_lock<std::mutex> lock(global_lock);
7881    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7882    if (pCB) {
7883        auto firstEventIndex = pCB->events.size();
7884        for (uint32_t i = 0; i < eventCount; ++i) {
7885            pCB->waitedEvents.insert(pEvents[i]);
7886            pCB->events.push_back(pEvents[i]);
7887        }
7888        std::function<bool(VkQueue)> eventUpdate =
7889            std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask);
7890        pCB->eventUpdates.push_back(eventUpdate);
7891        if (pCB->state == CB_RECORDING) {
7892            skipCall |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
7893        } else {
7894            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
7895        }
7896        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
7897        skipCall |=
7898            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7899                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7900    }
7901    lock.unlock();
7902    if (!skipCall)
7903        dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
7904                                                       memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7905                                                       pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7906}
7907
7908VKAPI_ATTR void VKAPI_CALL
7909CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
7910                   VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
7911                   uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
7912                   uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
7913    bool skipCall = false;
7914    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7915    std::unique_lock<std::mutex> lock(global_lock);
7916    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7917    if (pCB) {
7918        skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
7919        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
7920        skipCall |=
7921            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7922                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7923    }
7924    lock.unlock();
7925    if (!skipCall)
7926        dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
7927                                                            memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7928                                                            pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7929}
7930
7931bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
7932    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7933    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7934    if (pCB) {
7935        pCB->queryToStateMap[object] = value;
7936    }
7937    auto queue_data = dev_data->queueMap.find(queue);
7938    if (queue_data != dev_data->queueMap.end()) {
7939        queue_data->second.queryToStateMap[object] = value;
7940    }
7941    return false;
7942}
7943
7944VKAPI_ATTR void VKAPI_CALL
7945CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
7946    bool skipCall = false;
7947    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7948    std::unique_lock<std::mutex> lock(global_lock);
7949    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7950    if (pCB) {
7951        QueryObject query = {queryPool, slot};
7952        pCB->activeQueries.insert(query);
7953        if (!pCB->startedQueries.count(query)) {
7954            pCB->startedQueries.insert(query);
7955        }
7956        skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
7957    }
7958    lock.unlock();
7959    if (!skipCall)
7960        dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
7961}
7962
7963VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
7964    bool skipCall = false;
7965    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7966    std::unique_lock<std::mutex> lock(global_lock);
7967    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7968    if (pCB) {
7969        QueryObject query = {queryPool, slot};
7970        if (!pCB->activeQueries.count(query)) {
7971            skipCall |=
7972                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7973                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d",
7974                        (uint64_t)(queryPool), slot);
7975        } else {
7976            pCB->activeQueries.erase(query);
7977        }
7978        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
7979        pCB->queryUpdates.push_back(queryUpdate);
7980        if (pCB->state == CB_RECORDING) {
7981            skipCall |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
7982        } else {
7983            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
7984        }
7985    }
7986    lock.unlock();
7987    if (!skipCall)
7988        dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
7989}
7990
7991VKAPI_ATTR void VKAPI_CALL
7992CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
7993    bool skipCall = false;
7994    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7995    std::unique_lock<std::mutex> lock(global_lock);
7996    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7997    if (pCB) {
7998        for (uint32_t i = 0; i < queryCount; i++) {
7999            QueryObject query = {queryPool, firstQuery + i};
8000            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
8001            std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false);
8002            pCB->queryUpdates.push_back(queryUpdate);
8003        }
8004        if (pCB->state == CB_RECORDING) {
8005            skipCall |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8006        } else {
8007            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
8008        }
8009        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
8010    }
8011    lock.unlock();
8012    if (!skipCall)
8013        dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8014}
8015
8016bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) {
8017    bool skip_call = false;
8018    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
8019    auto queue_data = dev_data->queueMap.find(queue);
8020    if (queue_data == dev_data->queueMap.end())
8021        return false;
8022    for (uint32_t i = 0; i < queryCount; i++) {
8023        QueryObject query = {queryPool, firstQuery + i};
8024        auto query_data = queue_data->second.queryToStateMap.find(query);
8025        bool fail = false;
8026        if (query_data != queue_data->second.queryToStateMap.end()) {
8027            if (!query_data->second) {
8028                fail = true;
8029            }
8030        } else {
8031            auto global_query_data = dev_data->queryToStateMap.find(query);
8032            if (global_query_data != dev_data->queryToStateMap.end()) {
8033                if (!global_query_data->second) {
8034                    fail = true;
8035                }
8036            } else {
8037                fail = true;
8038            }
8039        }
8040        if (fail) {
8041            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8042                                 DRAWSTATE_INVALID_QUERY, "DS",
8043                                 "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
8044                                 reinterpret_cast<uint64_t &>(queryPool), firstQuery + i);
8045        }
8046    }
8047    return skip_call;
8048}
8049
8050VKAPI_ATTR void VKAPI_CALL
8051CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
8052                        VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
8053    bool skipCall = false;
8054    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8055    std::unique_lock<std::mutex> lock(global_lock);
8056    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8057#if MTMERGESOURCE
8058    VkDeviceMemory mem;
8059    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8060    skipCall |=
8061        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8062    if (cb_data != dev_data->commandBufferMap.end()) {
8063        std::function<bool()> function = [=]() {
8064            set_memory_valid(dev_data, mem, true);
8065            return false;
8066        };
8067        cb_data->second->validate_functions.push_back(function);
8068    }
8069    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyQueryPoolResults");
8070    // Validate that DST buffer has correct usage flags set
8071    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8072                                            "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8073#endif
8074    if (pCB) {
8075        std::function<bool(VkQueue)> queryUpdate =
8076            std::bind(validateQuery, std::placeholders::_1, pCB, queryPool, queryCount, firstQuery);
8077        pCB->queryUpdates.push_back(queryUpdate);
8078        if (pCB->state == CB_RECORDING) {
8079            skipCall |= addCmd(dev_data, pCB, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8080        } else {
8081            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
8082        }
8083        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyQueryPoolResults");
8084    }
8085    lock.unlock();
8086    if (!skipCall)
8087        dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
8088                                                                 dstOffset, stride, flags);
8089}
8090
8091VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
8092                                            VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
8093                                            const void *pValues) {
8094    bool skipCall = false;
8095    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8096    std::unique_lock<std::mutex> lock(global_lock);
8097    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8098    if (pCB) {
8099        if (pCB->state == CB_RECORDING) {
8100            skipCall |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8101        } else {
8102            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
8103        }
8104    }
8105    skipCall |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
8106    if (0 == stageFlags) {
8107        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8108                            DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() call has no stageFlags set.");
8109    }
8110
8111    // Check if push constant update is within any of the ranges with the same stage flags specified in pipeline layout.
8112    auto pipeline_layout = getPipelineLayout(dev_data, layout);
8113    if (!pipeline_layout) {
8114        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8115                            DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() Pipeline Layout 0x%" PRIx64 " not found.",
8116                            (uint64_t)layout);
8117    } else {
8118        // Coalesce adjacent/overlapping pipeline ranges before checking to see if incoming range is
8119        // contained in the pipeline ranges.
8120        // Build a {start, end} span list for ranges with matching stage flags.
8121        const auto &ranges = pipeline_layout->pushConstantRanges;
8122        struct span {
8123            uint32_t start;
8124            uint32_t end;
8125        };
8126        std::vector<span> spans;
8127        spans.reserve(ranges.size());
8128        for (const auto &iter : ranges) {
8129            if (iter.stageFlags == stageFlags) {
8130                spans.push_back({iter.offset, iter.offset + iter.size});
8131            }
8132        }
8133        if (spans.size() == 0) {
8134            // There were no ranges that matched the stageFlags.
8135            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8136                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS",
8137                                "vkCmdPushConstants() stageFlags = 0x%" PRIx32 " do not match "
8138                                "the stageFlags in any of the ranges in pipeline layout 0x%" PRIx64 ".",
8139                                (uint32_t)stageFlags, (uint64_t)layout);
8140        } else {
8141            // Sort span list by start value.
8142            struct comparer {
8143                bool operator()(struct span i, struct span j) { return i.start < j.start; }
8144            } my_comparer;
8145            std::sort(spans.begin(), spans.end(), my_comparer);
8146
8147            // Examine two spans at a time.
8148            std::vector<span>::iterator current = spans.begin();
8149            std::vector<span>::iterator next = current + 1;
8150            while (next != spans.end()) {
8151                if (current->end < next->start) {
8152                    // There is a gap; cannot coalesce. Move to the next two spans.
8153                    ++current;
8154                    ++next;
8155                } else {
8156                    // Coalesce the two spans.  The start of the next span
8157                    // is within the current span, so pick the larger of
8158                    // the end values to extend the current span.
8159                    // Then delete the next span and set next to the span after it.
8160                    current->end = max(current->end, next->end);
8161                    next = spans.erase(next);
8162                }
8163            }
8164
8165            // Now we can check if the incoming range is within any of the spans.
8166            bool contained_in_a_range = false;
8167            for (uint32_t i = 0; i < spans.size(); ++i) {
8168                if ((offset >= spans[i].start) && ((uint64_t)offset + (uint64_t)size <= (uint64_t)spans[i].end)) {
8169                    contained_in_a_range = true;
8170                    break;
8171                }
8172            }
8173            if (!contained_in_a_range) {
8174                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8175                                    __LINE__, DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS",
8176                                    "vkCmdPushConstants() Push constant range [%d, %d) "
8177                                    "with stageFlags = 0x%" PRIx32 " "
8178                                    "not within flag-matching ranges in pipeline layout 0x%" PRIx64 ".",
8179                                    offset, offset + size, (uint32_t)stageFlags, (uint64_t)layout);
8180            }
8181        }
8182    }
8183    lock.unlock();
8184    if (!skipCall)
8185        dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8186}
8187
8188VKAPI_ATTR void VKAPI_CALL
8189CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
8190    bool skipCall = false;
8191    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8192    std::unique_lock<std::mutex> lock(global_lock);
8193    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8194    if (pCB) {
8195        QueryObject query = {queryPool, slot};
8196        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
8197        pCB->queryUpdates.push_back(queryUpdate);
8198        if (pCB->state == CB_RECORDING) {
8199            skipCall |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8200        } else {
8201            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
8202        }
8203    }
8204    lock.unlock();
8205    if (!skipCall)
8206        dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8207}
8208
8209static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
8210                       const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag) {
8211    bool skip_call = false;
8212
8213    for (uint32_t attach = 0; attach < count; attach++) {
8214        if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
8215            // Attachment counts are verified elsewhere, but prevent an invalid access
8216            if (attachments[attach].attachment < fbci->attachmentCount) {
8217                const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
8218                VkImageViewCreateInfo *ivci = getImageViewData(dev_data, *image_view);
8219                if (ivci != nullptr) {
8220                    const VkImageCreateInfo *ici = &getImageNode(dev_data, ivci->image)->createInfo;
8221                    if (ici != nullptr) {
8222                        if ((ici->usage & usage_flag) == 0) {
8223                            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8224                                                 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_USAGE, "DS",
8225                                                 "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
8226                                                 "IMAGE_USAGE flags (%s).",
8227                                                 attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
8228                        }
8229                    }
8230                }
8231            }
8232        }
8233    }
8234    return skip_call;
8235}
8236
8237// Validate VkFramebufferCreateInfo which includes:
8238// 1. attachmentCount equals renderPass attachmentCount
8239// 2. corresponding framebuffer and renderpass attachments have matching formats
8240// 3. corresponding framebuffer and renderpass attachments have matching sample counts
8241// 4. fb attachments only have a single mip level
8242// 5. fb attachment dimensions are each at least as large as the fb
8243// 6. fb attachments use idenity swizzle
8244// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
8245// 8. fb dimensions are within physical device limits
8246static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
8247    bool skip_call = false;
8248
8249    auto rp_node = getRenderPass(dev_data, pCreateInfo->renderPass);
8250    if (rp_node) {
8251        const VkRenderPassCreateInfo *rpci = rp_node->pCreateInfo;
8252        if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
8253            skip_call |= log_msg(
8254                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8255                reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
8256                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of "
8257                "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer.",
8258                pCreateInfo->attachmentCount, rpci->attachmentCount, reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
8259        } else {
8260            // attachmentCounts match, so make sure corresponding attachment details line up
8261            const VkImageView *image_views = pCreateInfo->pAttachments;
8262            for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8263                VkImageViewCreateInfo *ivci = getImageViewData(dev_data, image_views[i]);
8264                if (ivci->format != rpci->pAttachments[i].format) {
8265                    skip_call |= log_msg(
8266                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8267                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
8268                        "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match "
8269                              "the format of "
8270                              "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").",
8271                        i, string_VkFormat(ivci->format), string_VkFormat(rpci->pAttachments[i].format),
8272                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
8273                }
8274                const VkImageCreateInfo *ici = &getImageNode(dev_data, ivci->image)->createInfo;
8275                if (ici->samples != rpci->pAttachments[i].samples) {
8276                    skip_call |= log_msg(
8277                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8278                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
8279                        "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match "
8280                              "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").",
8281                        i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
8282                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
8283                }
8284                // Verify that view only has a single mip level
8285                if (ivci->subresourceRange.levelCount != 1) {
8286                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
8287                                         __LINE__, DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
8288                                         "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u "
8289                                         "but only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer.",
8290                                         i, ivci->subresourceRange.levelCount);
8291                }
8292                const uint32_t mip_level = ivci->subresourceRange.baseMipLevel;
8293                uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
8294                uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
8295                if ((ivci->subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
8296                    (mip_height < pCreateInfo->height)) {
8297                    skip_call |=
8298                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
8299                                DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
8300                                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller "
8301                                "than the corresponding "
8302                                "framebuffer dimensions. Attachment dimensions must be at least as large. Here are the respective "
8303                                "dimensions for "
8304                                "attachment #%u, framebuffer:\n"
8305                                "width: %u, %u\n"
8306                                "height: %u, %u\n"
8307                                "layerCount: %u, %u\n",
8308                                i, ivci->subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
8309                                pCreateInfo->height, ivci->subresourceRange.layerCount, pCreateInfo->layers);
8310                }
8311                if (((ivci->components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci->components.r != VK_COMPONENT_SWIZZLE_R)) ||
8312                    ((ivci->components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci->components.g != VK_COMPONENT_SWIZZLE_G)) ||
8313                    ((ivci->components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci->components.b != VK_COMPONENT_SWIZZLE_B)) ||
8314                    ((ivci->components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci->components.a != VK_COMPONENT_SWIZZLE_A))) {
8315                    skip_call |= log_msg(
8316                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
8317                        DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
8318                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer "
8319                        "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n"
8320                        "r swizzle = %s\n"
8321                        "g swizzle = %s\n"
8322                        "b swizzle = %s\n"
8323                        "a swizzle = %s\n",
8324                        i, string_VkComponentSwizzle(ivci->components.r), string_VkComponentSwizzle(ivci->components.g),
8325                        string_VkComponentSwizzle(ivci->components.b), string_VkComponentSwizzle(ivci->components.a));
8326                }
8327            }
8328        }
8329        // Verify correct attachment usage flags
8330        for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
8331            // Verify input attachments:
8332            skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount,
8333                                    rpci->pSubpasses[subpass].pInputAttachments, pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT);
8334            // Verify color attachments:
8335            skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount,
8336                                    rpci->pSubpasses[subpass].pColorAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
8337            // Verify depth/stencil attachments:
8338            if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
8339                skip_call |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
8340                                        VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
8341            }
8342        }
8343    } else {
8344        skip_call |=
8345            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8346                    reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8347                    "vkCreateFramebuffer(): Attempt to create framebuffer with invalid renderPass (0x%" PRIxLEAST64 ").",
8348                    reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
8349    }
8350    // Verify FB dimensions are within physical device limits
8351    if ((pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) ||
8352        (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) ||
8353        (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers)) {
8354        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
8355                             DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
8356                             "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo dimensions exceed physical device limits. "
8357                             "Here are the respective dimensions: requested, device max:\n"
8358                             "width: %u, %u\n"
8359                             "height: %u, %u\n"
8360                             "layerCount: %u, %u\n",
8361                             pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
8362                             pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
8363                             pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers);
8364    }
8365    return skip_call;
8366}
8367
8368// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
8369//  Return true if an error is encountered and callback returns true to skip call down chain
8370//   false indicates that call down chain should proceed
8371static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
8372    // TODO : Verify that renderPass FB is created with is compatible with FB
8373    bool skip_call = false;
8374    skip_call |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
8375    return skip_call;
8376}
8377
8378// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
8379static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
8380    // Shadow create info and store in map
8381    std::unique_ptr<FRAMEBUFFER_NODE> fb_node(
8382        new FRAMEBUFFER_NODE(pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->pCreateInfo));
8383
8384    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8385        VkImageView view = pCreateInfo->pAttachments[i];
8386        auto view_data = getImageViewData(dev_data, view);
8387        if (!view_data) {
8388            continue;
8389        }
8390        MT_FB_ATTACHMENT_INFO fb_info;
8391        get_mem_binding_from_object(dev_data, (uint64_t)(view_data->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8392                                    &fb_info.mem);
8393        fb_info.image = view_data->image;
8394        fb_node->attachments.push_back(fb_info);
8395    }
8396    dev_data->frameBufferMap[fb] = std::move(fb_node);
8397}
8398
8399VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
8400                                                 const VkAllocationCallbacks *pAllocator,
8401                                                 VkFramebuffer *pFramebuffer) {
8402    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8403    std::unique_lock<std::mutex> lock(global_lock);
8404    bool skip_call = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
8405    lock.unlock();
8406
8407    if (skip_call)
8408        return VK_ERROR_VALIDATION_FAILED_EXT;
8409
8410    VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
8411
8412    if (VK_SUCCESS == result) {
8413        lock.lock();
8414        PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
8415        lock.unlock();
8416    }
8417    return result;
8418}
8419
8420static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
8421                           std::unordered_set<uint32_t> &processed_nodes) {
8422    // If we have already checked this node we have not found a dependency path so return false.
8423    if (processed_nodes.count(index))
8424        return false;
8425    processed_nodes.insert(index);
8426    const DAGNode &node = subpass_to_node[index];
8427    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
8428    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
8429        for (auto elem : node.prev) {
8430            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
8431                return true;
8432        }
8433    } else {
8434        return true;
8435    }
8436    return false;
8437}
8438
8439static bool CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
8440                                  const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
8441    bool result = true;
8442    // Loop through all subpasses that share the same attachment and make sure a dependency exists
8443    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
8444        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k])
8445            continue;
8446        const DAGNode &node = subpass_to_node[subpass];
8447        // Check for a specified dependency between the two nodes. If one exists we are done.
8448        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
8449        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
8450        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
8451            // If no dependency exits an implicit dependency still might. If not, throw an error.
8452            std::unordered_set<uint32_t> processed_nodes;
8453            if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
8454                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
8455                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8456                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8457                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
8458                                     dependent_subpasses[k]);
8459                result = false;
8460            }
8461        }
8462    }
8463    return result;
8464}
8465
8466static bool CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
8467                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
8468    const DAGNode &node = subpass_to_node[index];
8469    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
8470    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8471    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8472        if (attachment == subpass.pColorAttachments[j].attachment)
8473            return true;
8474    }
8475    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8476        if (attachment == subpass.pDepthStencilAttachment->attachment)
8477            return true;
8478    }
8479    bool result = false;
8480    // Loop through previous nodes and see if any of them write to the attachment.
8481    for (auto elem : node.prev) {
8482        result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
8483    }
8484    // If the attachment was written to by a previous node than this node needs to preserve it.
8485    if (result && depth > 0) {
8486        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8487        bool has_preserved = false;
8488        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8489            if (subpass.pPreserveAttachments[j] == attachment) {
8490                has_preserved = true;
8491                break;
8492            }
8493        }
8494        if (!has_preserved) {
8495            skip_call |=
8496                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8497                        DRAWSTATE_INVALID_RENDERPASS, "DS",
8498                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
8499        }
8500    }
8501    return result;
8502}
8503
8504template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
8505    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
8506           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
8507}
8508
8509bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
8510    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
8511            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
8512}
8513
8514static bool ValidateDependencies(const layer_data *my_data, FRAMEBUFFER_NODE const * framebuffer,
8515                                 RENDER_PASS_NODE const * renderPass) {
8516    bool skip_call = false;
8517    const safe_VkFramebufferCreateInfo *pFramebufferInfo = &framebuffer->createInfo;
8518    const VkRenderPassCreateInfo *pCreateInfo = renderPass->pCreateInfo;
8519    auto const & subpass_to_node = renderPass->subpassToNode;
8520    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
8521    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
8522    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
8523    // Find overlapping attachments
8524    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8525        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
8526            VkImageView viewi = pFramebufferInfo->pAttachments[i];
8527            VkImageView viewj = pFramebufferInfo->pAttachments[j];
8528            if (viewi == viewj) {
8529                overlapping_attachments[i].push_back(j);
8530                overlapping_attachments[j].push_back(i);
8531                continue;
8532            }
8533            auto view_data_i = getImageViewData(my_data, viewi);
8534            auto view_data_j = getImageViewData(my_data, viewj);
8535            if (!view_data_i || !view_data_j) {
8536                continue;
8537            }
8538            if (view_data_i->image == view_data_j->image &&
8539                isRegionOverlapping(view_data_i->subresourceRange, view_data_j->subresourceRange)) {
8540                overlapping_attachments[i].push_back(j);
8541                overlapping_attachments[j].push_back(i);
8542                continue;
8543            }
8544            auto image_data_i = getImageNode(my_data, view_data_i->image);
8545            auto image_data_j = getImageNode(my_data, view_data_j->image);
8546            if (!image_data_i || !image_data_j) {
8547                continue;
8548            }
8549            if (image_data_i->mem == image_data_j->mem && isRangeOverlapping(image_data_i->memOffset, image_data_i->memSize,
8550                                                                             image_data_j->memOffset, image_data_j->memSize)) {
8551                overlapping_attachments[i].push_back(j);
8552                overlapping_attachments[j].push_back(i);
8553            }
8554        }
8555    }
8556    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
8557        uint32_t attachment = i;
8558        for (auto other_attachment : overlapping_attachments[i]) {
8559            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8560                skip_call |=
8561                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8562                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
8563                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8564                            attachment, other_attachment);
8565            }
8566            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8567                skip_call |=
8568                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8569                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
8570                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8571                            other_attachment, attachment);
8572            }
8573        }
8574    }
8575    // Find for each attachment the subpasses that use them.
8576    unordered_set<uint32_t> attachmentIndices;
8577    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8578        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8579        attachmentIndices.clear();
8580        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8581            uint32_t attachment = subpass.pInputAttachments[j].attachment;
8582            input_attachment_to_subpass[attachment].push_back(i);
8583            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8584                input_attachment_to_subpass[overlapping_attachment].push_back(i);
8585            }
8586        }
8587        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8588            uint32_t attachment = subpass.pColorAttachments[j].attachment;
8589            output_attachment_to_subpass[attachment].push_back(i);
8590            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8591                output_attachment_to_subpass[overlapping_attachment].push_back(i);
8592            }
8593            attachmentIndices.insert(attachment);
8594        }
8595        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8596            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8597            output_attachment_to_subpass[attachment].push_back(i);
8598            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8599                output_attachment_to_subpass[overlapping_attachment].push_back(i);
8600            }
8601
8602            if (attachmentIndices.count(attachment)) {
8603                skip_call |=
8604                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
8605                            0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8606                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).",
8607                            attachment, i);
8608            }
8609        }
8610    }
8611    // If there is a dependency needed make sure one exists
8612    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8613        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8614        // If the attachment is an input then all subpasses that output must have a dependency relationship
8615        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8616            const uint32_t &attachment = subpass.pInputAttachments[j].attachment;
8617            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8618        }
8619        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
8620        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8621            const uint32_t &attachment = subpass.pColorAttachments[j].attachment;
8622            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8623            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8624        }
8625        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8626            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
8627            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8628            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8629        }
8630    }
8631    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
8632    // written.
8633    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8634        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8635        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8636            CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
8637        }
8638    }
8639    return skip_call;
8640}
8641// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
8642// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that
8643// READ_ONLY layout attachments don't have CLEAR as their loadOp.
8644static bool ValidateLayoutVsAttachmentDescription(debug_report_data *report_data, const VkImageLayout first_layout,
8645                                                  const uint32_t attachment,
8646                                                  const VkAttachmentDescription &attachment_description) {
8647    bool skip_call = false;
8648    // Verify that initial loadOp on READ_ONLY attachments is not CLEAR
8649    if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
8650        if ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
8651            (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)) {
8652            skip_call |=
8653                log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8654                        VkDebugReportObjectTypeEXT(0), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8655                        "Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout));
8656        }
8657    }
8658    return skip_call;
8659}
8660
8661static bool ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
8662    bool skip = false;
8663
8664    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8665        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8666        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8667            if (subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
8668                subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
8669                if (subpass.pInputAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
8670                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8671                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8672                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8673                                    "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
8674                } else {
8675                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8676                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8677                                    "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
8678                                    string_VkImageLayout(subpass.pInputAttachments[j].layout));
8679                }
8680            }
8681            auto attach_index = subpass.pInputAttachments[j].attachment;
8682            skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pInputAttachments[j].layout, attach_index,
8683                                                          pCreateInfo->pAttachments[attach_index]);
8684        }
8685        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8686            if (subpass.pColorAttachments[j].layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
8687                if (subpass.pColorAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
8688                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8689                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8690                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8691                                    "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
8692                } else {
8693                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8694                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8695                                    "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
8696                                    string_VkImageLayout(subpass.pColorAttachments[j].layout));
8697                }
8698            }
8699            auto attach_index = subpass.pColorAttachments[j].attachment;
8700            skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pColorAttachments[j].layout, attach_index,
8701                                                          pCreateInfo->pAttachments[attach_index]);
8702        }
8703        if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
8704            if (subpass.pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
8705                if (subpass.pDepthStencilAttachment->layout == VK_IMAGE_LAYOUT_GENERAL) {
8706                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8707                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8708                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8709                                    "Layout for depth attachment is GENERAL but should be DEPTH_STENCIL_ATTACHMENT_OPTIMAL.");
8710                } else {
8711                    skip |=
8712                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8713                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8714                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.",
8715                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
8716                }
8717            }
8718            auto attach_index = subpass.pDepthStencilAttachment->attachment;
8719            skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pDepthStencilAttachment->layout,
8720                                                          attach_index, pCreateInfo->pAttachments[attach_index]);
8721        }
8722    }
8723    return skip;
8724}
8725
8726static bool CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
8727                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
8728    bool skip_call = false;
8729    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8730        DAGNode &subpass_node = subpass_to_node[i];
8731        subpass_node.pass = i;
8732    }
8733    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
8734        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
8735        if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
8736            dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
8737            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8738                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
8739                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
8740        } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
8741            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8742                                 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
8743        } else if (dependency.srcSubpass == dependency.dstSubpass) {
8744            has_self_dependency[dependency.srcSubpass] = true;
8745        }
8746        if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
8747            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
8748        }
8749        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
8750            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
8751        }
8752    }
8753    return skip_call;
8754}
8755
8756
8757VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
8758                                                  const VkAllocationCallbacks *pAllocator,
8759                                                  VkShaderModule *pShaderModule) {
8760    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8761    bool skip_call = false;
8762
8763    /* Use SPIRV-Tools validator to try and catch any issues with the module itself */
8764    spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
8765    spv_const_binary_t binary { pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t) };
8766    spv_diagnostic diag = nullptr;
8767
8768    auto result = spvValidate(ctx, &binary, &diag);
8769    if (result != SPV_SUCCESS) {
8770        skip_call |= log_msg(my_data->report_data,
8771                             result == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
8772                             VkDebugReportObjectTypeEXT(0), 0,
8773                             __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC", "SPIR-V module not valid: %s",
8774                             diag && diag->error ? diag->error : "(no error text)");
8775    }
8776
8777    spvDiagnosticDestroy(diag);
8778    spvContextDestroy(ctx);
8779
8780    if (skip_call)
8781        return VK_ERROR_VALIDATION_FAILED_EXT;
8782
8783    VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
8784
8785    if (res == VK_SUCCESS) {
8786        std::lock_guard<std::mutex> lock(global_lock);
8787        my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
8788    }
8789    return res;
8790}
8791
8792static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
8793    bool skip_call = false;
8794    if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
8795        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8796                             DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
8797                             "CreateRenderPass: %s attachment %d cannot be greater than the total number of attachments %d.",
8798                             type, attachment, attachment_count);
8799    }
8800    return skip_call;
8801}
8802
8803static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
8804    bool skip_call = false;
8805    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8806        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8807        if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
8808            skip_call |=
8809                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8810                        DRAWSTATE_INVALID_RENDERPASS, "DS",
8811                        "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
8812        }
8813        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8814            uint32_t attachment = subpass.pPreserveAttachments[j];
8815            if (attachment == VK_ATTACHMENT_UNUSED) {
8816                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8817                                     __LINE__, DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
8818                                     "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", j);
8819            } else {
8820                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
8821            }
8822        }
8823        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8824            uint32_t attachment;
8825            if (subpass.pResolveAttachments) {
8826                attachment = subpass.pResolveAttachments[j].attachment;
8827                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
8828            }
8829            attachment = subpass.pColorAttachments[j].attachment;
8830            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
8831        }
8832        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8833            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8834            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
8835        }
8836        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8837            uint32_t attachment = subpass.pInputAttachments[j].attachment;
8838            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
8839        }
8840    }
8841    return skip_call;
8842}
8843
8844VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
8845                                                const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
8846    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
8847    bool skip_call = false;
8848    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8849
8850    std::unique_lock<std::mutex> lock(global_lock);
8851
8852    skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
8853    // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
8854    //       ValidateLayouts.
8855    skip_call |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
8856
8857    if (skip_call) {
8858        return VK_ERROR_VALIDATION_FAILED_EXT;
8859    }
8860
8861    lock.unlock();
8862    if (skip_call == false) {
8863        result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
8864    }
8865    if (VK_SUCCESS == result) {
8866        lock.lock();
8867
8868        std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
8869        std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
8870        skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
8871
8872        // TODOSC : Merge in tracking of renderpass from shader_checker
8873        // Shadow create info and store in map
8874        VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
8875        if (pCreateInfo->pAttachments) {
8876            localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount];
8877            memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments,
8878                   localRPCI->attachmentCount * sizeof(VkAttachmentDescription));
8879        }
8880        if (pCreateInfo->pSubpasses) {
8881            localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount];
8882            memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription));
8883
8884            for (uint32_t i = 0; i < localRPCI->subpassCount; i++) {
8885                VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i];
8886                const uint32_t attachmentCount = subpass->inputAttachmentCount +
8887                                                 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) +
8888                                                 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
8889                VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount];
8890
8891                memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount);
8892                subpass->pInputAttachments = attachments;
8893                attachments += subpass->inputAttachmentCount;
8894
8895                memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
8896                subpass->pColorAttachments = attachments;
8897                attachments += subpass->colorAttachmentCount;
8898
8899                if (subpass->pResolveAttachments) {
8900                    memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
8901                    subpass->pResolveAttachments = attachments;
8902                    attachments += subpass->colorAttachmentCount;
8903                }
8904
8905                if (subpass->pDepthStencilAttachment) {
8906                    memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1);
8907                    subpass->pDepthStencilAttachment = attachments;
8908                    attachments += 1;
8909                }
8910
8911                memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount);
8912                subpass->pPreserveAttachments = &attachments->attachment;
8913            }
8914        }
8915        if (pCreateInfo->pDependencies) {
8916            localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
8917            memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies,
8918                   localRPCI->dependencyCount * sizeof(VkSubpassDependency));
8919        }
8920
8921        auto render_pass = new RENDER_PASS_NODE(localRPCI);
8922        render_pass->renderPass = *pRenderPass;
8923        render_pass->hasSelfDependency = has_self_dependency;
8924        render_pass->subpassToNode = subpass_to_node;
8925#if MTMERGESOURCE
8926        // MTMTODO : Merge with code from above to eliminate duplication
8927        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8928            VkAttachmentDescription desc = pCreateInfo->pAttachments[i];
8929            MT_PASS_ATTACHMENT_INFO pass_info;
8930            pass_info.load_op = desc.loadOp;
8931            pass_info.store_op = desc.storeOp;
8932            pass_info.stencil_load_op = desc.stencilLoadOp;
8933            pass_info.stencil_store_op = desc.stencilStoreOp;
8934            pass_info.attachment = i;
8935            render_pass->attachments.push_back(pass_info);
8936        }
8937        // TODO: Maybe fill list and then copy instead of locking
8938        std::unordered_map<uint32_t, bool> &attachment_first_read = render_pass->attachment_first_read;
8939        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = render_pass->attachment_first_layout;
8940        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8941            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8942            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8943                uint32_t attachment = subpass.pColorAttachments[j].attachment;
8944                if (!attachment_first_read.count(attachment)) {
8945                    attachment_first_read.insert(std::make_pair(attachment, false));
8946                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
8947                }
8948            }
8949            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8950                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8951                if (!attachment_first_read.count(attachment)) {
8952                    attachment_first_read.insert(std::make_pair(attachment, false));
8953                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
8954                }
8955            }
8956            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8957                uint32_t attachment = subpass.pInputAttachments[j].attachment;
8958                if (!attachment_first_read.count(attachment)) {
8959                    attachment_first_read.insert(std::make_pair(attachment, true));
8960                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
8961                }
8962            }
8963        }
8964#endif
8965        dev_data->renderPassMap[*pRenderPass] = render_pass;
8966    }
8967    return result;
8968}
8969
8970// Free the renderpass shadow
8971static void deleteRenderPasses(layer_data *my_data) {
8972    if (my_data->renderPassMap.size() <= 0)
8973        return;
8974    for (auto ii = my_data->renderPassMap.begin(); ii != my_data->renderPassMap.end(); ++ii) {
8975        const VkRenderPassCreateInfo *pRenderPassInfo = (*ii).second->pCreateInfo;
8976        delete[] pRenderPassInfo->pAttachments;
8977        if (pRenderPassInfo->pSubpasses) {
8978            for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) {
8979                // Attachements are all allocated in a block, so just need to
8980                //  find the first non-null one to delete
8981                if (pRenderPassInfo->pSubpasses[i].pInputAttachments) {
8982                    delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments;
8983                } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) {
8984                    delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments;
8985                } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) {
8986                    delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments;
8987                } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) {
8988                    delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments;
8989                }
8990            }
8991            delete[] pRenderPassInfo->pSubpasses;
8992        }
8993        delete[] pRenderPassInfo->pDependencies;
8994        delete pRenderPassInfo;
8995        delete (*ii).second;
8996    }
8997    my_data->renderPassMap.clear();
8998}
8999
9000static bool VerifyFramebufferAndRenderPassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
9001    bool skip_call = false;
9002    const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
9003    const safe_VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer]->createInfo;
9004    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
9005        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9006                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
9007                                                                 "with a different number of attachments.");
9008    }
9009    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9010        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9011        auto image_data = getImageViewData(dev_data, image_view);
9012        assert(image_data);
9013        const VkImage &image = image_data->image;
9014        const VkImageSubresourceRange &subRange = image_data->subresourceRange;
9015        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
9016                                             pRenderPassInfo->pAttachments[i].initialLayout};
9017        // TODO: Do not iterate over every possibility - consolidate where possible
9018        for (uint32_t j = 0; j < subRange.levelCount; j++) {
9019            uint32_t level = subRange.baseMipLevel + j;
9020            for (uint32_t k = 0; k < subRange.layerCount; k++) {
9021                uint32_t layer = subRange.baseArrayLayer + k;
9022                VkImageSubresource sub = {subRange.aspectMask, level, layer};
9023                IMAGE_CMD_BUF_LAYOUT_NODE node;
9024                if (!FindLayout(pCB, image, sub, node)) {
9025                    SetLayout(pCB, image, sub, newNode);
9026                    continue;
9027                }
9028                if (newNode.layout != node.layout) {
9029                    skip_call |=
9030                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9031                                DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using attachment %i "
9032                                                                    "where the "
9033                                                                    "initial layout is %s and the layout of the attachment at the "
9034                                                                    "start of the render pass is %s. The layouts must match.",
9035                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
9036                }
9037            }
9038        }
9039    }
9040    return skip_call;
9041}
9042
9043static void TransitionSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
9044                                     const int subpass_index) {
9045    auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
9046    if (!renderPass)
9047        return;
9048
9049    auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer);
9050    if (!framebuffer)
9051        return;
9052
9053    const safe_VkFramebufferCreateInfo &framebufferInfo = framebuffer->createInfo;
9054    const VkSubpassDescription &subpass = renderPass->pCreateInfo->pSubpasses[subpass_index];
9055    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9056        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pInputAttachments[j].attachment];
9057        SetLayout(dev_data, pCB, image_view, subpass.pInputAttachments[j].layout);
9058    }
9059    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9060        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pColorAttachments[j].attachment];
9061        SetLayout(dev_data, pCB, image_view, subpass.pColorAttachments[j].layout);
9062    }
9063    if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9064        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pDepthStencilAttachment->attachment];
9065        SetLayout(dev_data, pCB, image_view, subpass.pDepthStencilAttachment->layout);
9066    }
9067}
9068
9069static bool validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
9070    bool skip_call = false;
9071    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
9072        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9073                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
9074                             cmd_name.c_str());
9075    }
9076    return skip_call;
9077}
9078
9079static void TransitionFinalSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
9080    auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
9081    if (!renderPass)
9082        return;
9083
9084    const VkRenderPassCreateInfo *pRenderPassInfo = renderPass->pCreateInfo;
9085    auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer);
9086    if (!framebuffer)
9087        return;
9088
9089    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9090        const VkImageView &image_view = framebuffer->createInfo.pAttachments[i];
9091        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
9092    }
9093}
9094
9095static bool VerifyRenderAreaBounds(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
9096    bool skip_call = false;
9097    const safe_VkFramebufferCreateInfo *pFramebufferInfo = &getFramebuffer(my_data, pRenderPassBegin->framebuffer)->createInfo;
9098    if (pRenderPassBegin->renderArea.offset.x < 0 ||
9099        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
9100        pRenderPassBegin->renderArea.offset.y < 0 ||
9101        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
9102        skip_call |= static_cast<bool>(log_msg(
9103            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9104            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
9105            "Cannot execute a render pass with renderArea not within the bound of the "
9106            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
9107            "height %d.",
9108            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
9109            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
9110    }
9111    return skip_call;
9112}
9113
9114// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
9115// [load|store]Op flag must be checked
9116// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
9117template <typename T> static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
9118    if (color_depth_op != op && stencil_op != op) {
9119        return false;
9120    }
9121    bool check_color_depth_load_op = !vk_format_is_stencil_only(format);
9122    bool check_stencil_load_op = vk_format_is_depth_and_stencil(format) || !check_color_depth_load_op;
9123
9124    return (((check_color_depth_load_op == true) && (color_depth_op == op)) ||
9125            ((check_stencil_load_op == true) && (stencil_op == op)));
9126}
9127
9128VKAPI_ATTR void VKAPI_CALL
9129CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
9130    bool skipCall = false;
9131    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9132    std::unique_lock<std::mutex> lock(global_lock);
9133    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9134    auto renderPass = pRenderPassBegin ? getRenderPass(dev_data, pRenderPassBegin->renderPass) : nullptr;
9135    auto framebuffer = pRenderPassBegin ? getFramebuffer(dev_data, pRenderPassBegin->framebuffer) : nullptr;
9136    if (pCB) {
9137        if (renderPass) {
9138            uint32_t clear_op_count = 0;
9139            pCB->activeFramebuffer = pRenderPassBegin->framebuffer;
9140            for (size_t i = 0; i < renderPass->attachments.size(); ++i) {
9141                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
9142                VkFormat format = renderPass->pCreateInfo->pAttachments[renderPass->attachments[i].attachment].format;
9143                if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op,
9144                                                         renderPass->attachments[i].stencil_load_op,
9145                                                         VK_ATTACHMENT_LOAD_OP_CLEAR)) {
9146                    ++clear_op_count;
9147                    std::function<bool()> function = [=]() {
9148                        set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9149                        return false;
9150                    };
9151                    pCB->validate_functions.push_back(function);
9152                } else if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op,
9153                                                                renderPass->attachments[i].stencil_load_op,
9154                                                                VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
9155                    std::function<bool()> function = [=]() {
9156                        set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9157                        return false;
9158                    };
9159                    pCB->validate_functions.push_back(function);
9160                } else if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op,
9161                                                                renderPass->attachments[i].stencil_load_op,
9162                                                                VK_ATTACHMENT_LOAD_OP_LOAD)) {
9163                    std::function<bool()> function = [=]() {
9164                        return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9165                    };
9166                    pCB->validate_functions.push_back(function);
9167                }
9168                if (renderPass->attachment_first_read[renderPass->attachments[i].attachment]) {
9169                    std::function<bool()> function = [=]() {
9170                        return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9171                    };
9172                    pCB->validate_functions.push_back(function);
9173                }
9174            }
9175            if (clear_op_count > pRenderPassBegin->clearValueCount) {
9176                skipCall |= log_msg(
9177                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9178                    reinterpret_cast<uint64_t &>(renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9179                    "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but the actual number "
9180                    "of attachments in renderPass 0x%" PRIx64 " that use VK_ATTACHMENT_LOAD_OP_CLEAR is %u. The clearValueCount "
9181                    "must therefore be greater than or equal to %u.",
9182                    pRenderPassBegin->clearValueCount, reinterpret_cast<uint64_t &>(renderPass), clear_op_count, clear_op_count);
9183            }
9184            skipCall |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
9185            skipCall |= VerifyFramebufferAndRenderPassLayouts(dev_data, pCB, pRenderPassBegin);
9186            skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass");
9187            skipCall |= ValidateDependencies(dev_data, framebuffer, renderPass);
9188            skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass");
9189            skipCall |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
9190            pCB->activeRenderPass = renderPass;
9191            // This is a shallow copy as that is all that is needed for now
9192            pCB->activeRenderPassBeginInfo = *pRenderPassBegin;
9193            pCB->activeSubpass = 0;
9194            pCB->activeSubpassContents = contents;
9195            pCB->framebuffers.insert(pRenderPassBegin->framebuffer);
9196            // Connect this framebuffer to this cmdBuffer
9197            framebuffer->referencingCmdBuffers.insert(pCB->commandBuffer);
9198        } else {
9199            skipCall |=
9200                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9201                            DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
9202        }
9203    }
9204    lock.unlock();
9205    if (!skipCall) {
9206        dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
9207    }
9208}
9209
9210VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
9211    bool skipCall = false;
9212    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9213    std::unique_lock<std::mutex> lock(global_lock);
9214    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9215    if (pCB) {
9216        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
9217        skipCall |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
9218        pCB->activeSubpass++;
9219        pCB->activeSubpassContents = contents;
9220        TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
9221        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
9222    }
9223    lock.unlock();
9224    if (!skipCall)
9225        dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents);
9226}
9227
9228VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
9229    bool skipCall = false;
9230    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9231    std::unique_lock<std::mutex> lock(global_lock);
9232    auto pCB = getCBNode(dev_data, commandBuffer);
9233    if (pCB) {
9234        RENDER_PASS_NODE* pRPNode = pCB->activeRenderPass;
9235        auto framebuffer = getFramebuffer(dev_data, pCB->activeFramebuffer);
9236        if (pRPNode) {
9237            for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9238                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
9239                VkFormat format = pRPNode->pCreateInfo->pAttachments[pRPNode->attachments[i].attachment].format;
9240                if (FormatSpecificLoadAndStoreOpSettings(format, pRPNode->attachments[i].store_op,
9241                                                         pRPNode->attachments[i].stencil_store_op, VK_ATTACHMENT_STORE_OP_STORE)) {
9242                    std::function<bool()> function = [=]() {
9243                        set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9244                        return false;
9245                    };
9246                    pCB->validate_functions.push_back(function);
9247                } else if (FormatSpecificLoadAndStoreOpSettings(format, pRPNode->attachments[i].store_op,
9248                                                                pRPNode->attachments[i].stencil_store_op,
9249                                                                VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
9250                    std::function<bool()> function = [=]() {
9251                        set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9252                        return false;
9253                    };
9254                    pCB->validate_functions.push_back(function);
9255                }
9256            }
9257        }
9258        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
9259        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
9260        skipCall |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9261        TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo);
9262        pCB->activeRenderPass = nullptr;
9263        pCB->activeSubpass = 0;
9264        pCB->activeFramebuffer = VK_NULL_HANDLE;
9265    }
9266    lock.unlock();
9267    if (!skipCall)
9268        dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer);
9269}
9270
9271static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, RENDER_PASS_NODE const *secondaryPass,
9272                                        RENDER_PASS_NODE const *primaryPass, uint32_t primaryAttach, uint32_t secondaryAttach,
9273                                        const char *msg) {
9274    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9275                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9276                   "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p which has a render pass 0x%" PRIx64
9277                   " that is not compatible with the current render pass 0x%" PRIx64 "."
9278                   "Attachment %" PRIu32 " is not compatible with %" PRIu32 ". %s",
9279                   (void *)secondaryBuffer, (uint64_t)(secondaryPass->renderPass), (uint64_t)(primaryPass->renderPass), primaryAttach, secondaryAttach,
9280                   msg);
9281}
9282
9283static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, RENDER_PASS_NODE const *primaryPass,
9284                                            uint32_t primaryAttach, VkCommandBuffer secondaryBuffer, RENDER_PASS_NODE const *secondaryPass,
9285                                            uint32_t secondaryAttach, bool is_multi) {
9286    bool skip_call = false;
9287    if (primaryPass->pCreateInfo->attachmentCount <= primaryAttach) {
9288        primaryAttach = VK_ATTACHMENT_UNUSED;
9289    }
9290    if (secondaryPass->pCreateInfo->attachmentCount <= secondaryAttach) {
9291        secondaryAttach = VK_ATTACHMENT_UNUSED;
9292    }
9293    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
9294        return skip_call;
9295    }
9296    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
9297        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9298                                                 secondaryAttach, "The first is unused while the second is not.");
9299        return skip_call;
9300    }
9301    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
9302        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9303                                                 secondaryAttach, "The second is unused while the first is not.");
9304        return skip_call;
9305    }
9306    if (primaryPass->pCreateInfo->pAttachments[primaryAttach].format !=
9307        secondaryPass->pCreateInfo->pAttachments[secondaryAttach].format) {
9308        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9309                                                 secondaryAttach, "They have different formats.");
9310    }
9311    if (primaryPass->pCreateInfo->pAttachments[primaryAttach].samples !=
9312        secondaryPass->pCreateInfo->pAttachments[secondaryAttach].samples) {
9313        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9314                                                 secondaryAttach, "They have different samples.");
9315    }
9316    if (is_multi &&
9317        primaryPass->pCreateInfo->pAttachments[primaryAttach].flags !=
9318            secondaryPass->pCreateInfo->pAttachments[secondaryAttach].flags) {
9319        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9320                                                 secondaryAttach, "They have different flags.");
9321    }
9322    return skip_call;
9323}
9324
9325static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, RENDER_PASS_NODE const *primaryPass,
9326                                         VkCommandBuffer secondaryBuffer, RENDER_PASS_NODE const *secondaryPass, const int subpass,
9327                                         bool is_multi) {
9328    bool skip_call = false;
9329    const VkSubpassDescription &primary_desc = primaryPass->pCreateInfo->pSubpasses[subpass];
9330    const VkSubpassDescription &secondary_desc = secondaryPass->pCreateInfo->pSubpasses[subpass];
9331    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
9332    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
9333        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
9334        if (i < primary_desc.inputAttachmentCount) {
9335            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
9336        }
9337        if (i < secondary_desc.inputAttachmentCount) {
9338            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
9339        }
9340        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_input_attach, secondaryBuffer,
9341                                                     secondaryPass, secondary_input_attach, is_multi);
9342    }
9343    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
9344    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
9345        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
9346        if (i < primary_desc.colorAttachmentCount) {
9347            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
9348        }
9349        if (i < secondary_desc.colorAttachmentCount) {
9350            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
9351        }
9352        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_color_attach, secondaryBuffer,
9353                                                     secondaryPass, secondary_color_attach, is_multi);
9354        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
9355        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
9356            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
9357        }
9358        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
9359            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
9360        }
9361        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_resolve_attach, secondaryBuffer,
9362                                                     secondaryPass, secondary_resolve_attach, is_multi);
9363    }
9364    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
9365    if (primary_desc.pDepthStencilAttachment) {
9366        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
9367    }
9368    if (secondary_desc.pDepthStencilAttachment) {
9369        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
9370    }
9371    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_depthstencil_attach, secondaryBuffer,
9372                                                 secondaryPass, secondary_depthstencil_attach, is_multi);
9373    return skip_call;
9374}
9375
9376static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9377                                            VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass) {
9378    bool skip_call = false;
9379    // Early exit if renderPass objects are identical (and therefore compatible)
9380    if (primaryPass == secondaryPass)
9381        return skip_call;
9382    auto primary_render_pass = getRenderPass(dev_data, primaryPass);
9383    auto secondary_render_pass = getRenderPass(dev_data, secondaryPass);
9384    if (!primary_render_pass) {
9385        skip_call |=
9386            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9387                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9388                    "vkCmdExecuteCommands() called w/ invalid current Cmd Buffer 0x%p which has invalid render pass 0x%" PRIx64 ".",
9389                    (void *)primaryBuffer, (uint64_t)(primaryPass));
9390        return skip_call;
9391    }
9392    if (!secondary_render_pass) {
9393        skip_call |=
9394            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9395                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9396                    "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%p which has invalid render pass 0x%" PRIx64 ".",
9397                    (void *)secondaryBuffer, (uint64_t)(secondaryPass));
9398        return skip_call;
9399    }
9400    if (primary_render_pass->pCreateInfo->subpassCount != secondary_render_pass->pCreateInfo->subpassCount) {
9401        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9402                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9403                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p which has a render pass 0x%" PRIx64
9404                             " that is not compatible with the current render pass 0x%" PRIx64 "."
9405                             "They have a different number of subpasses.",
9406                             (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass));
9407        return skip_call;
9408    }
9409    auto subpassCount = primary_render_pass->pCreateInfo->subpassCount;
9410    for (uint32_t i = 0; i < subpassCount; ++i) {
9411        skip_call |= validateSubpassCompatibility(dev_data, primaryBuffer, primary_render_pass, secondaryBuffer,
9412                                                  secondary_render_pass, i, subpassCount > 1);
9413    }
9414    return skip_call;
9415}
9416
9417static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9418                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
9419    bool skip_call = false;
9420    if (!pSubCB->beginInfo.pInheritanceInfo) {
9421        return skip_call;
9422    }
9423    VkFramebuffer primary_fb = pCB->activeFramebuffer;
9424    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9425    if (secondary_fb != VK_NULL_HANDLE) {
9426        if (primary_fb != secondary_fb) {
9427            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9428                                 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9429                                 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p which has a framebuffer 0x%" PRIx64
9430                                 " that is not compatible with the current framebuffer 0x%" PRIx64 ".",
9431                                 (void *)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb));
9432        }
9433        auto fb = getFramebuffer(dev_data, secondary_fb);
9434        if (!fb) {
9435            skip_call |=
9436                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9437                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9438                                                                          "which has invalid framebuffer 0x%" PRIx64 ".",
9439                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
9440            return skip_call;
9441        }
9442        skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->createInfo.renderPass,
9443                                                     secondaryBuffer, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9444    }
9445    return skip_call;
9446}
9447
9448static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9449    bool skipCall = false;
9450    unordered_set<int> activeTypes;
9451    for (auto queryObject : pCB->activeQueries) {
9452        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9453        if (queryPoolData != dev_data->queryPoolMap.end()) {
9454            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9455                pSubCB->beginInfo.pInheritanceInfo) {
9456                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
9457                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
9458                    skipCall |= log_msg(
9459                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9460                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9461                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9462                        "which has invalid active query pool 0x%" PRIx64 ". Pipeline statistics is being queried so the command "
9463                        "buffer must have all bits set on the queryPool.",
9464                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
9465                }
9466            }
9467            activeTypes.insert(queryPoolData->second.createInfo.queryType);
9468        }
9469    }
9470    for (auto queryObject : pSubCB->startedQueries) {
9471        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9472        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
9473            skipCall |=
9474                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9475                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9476                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9477                        "which has invalid active query pool 0x%" PRIx64 "of type %d but a query of that type has been started on "
9478                        "secondary Cmd Buffer 0x%p.",
9479                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
9480                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
9481        }
9482    }
9483    return skipCall;
9484}
9485
9486VKAPI_ATTR void VKAPI_CALL
9487CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
9488    bool skipCall = false;
9489    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9490    std::unique_lock<std::mutex> lock(global_lock);
9491    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9492    if (pCB) {
9493        GLOBAL_CB_NODE *pSubCB = NULL;
9494        for (uint32_t i = 0; i < commandBuffersCount; i++) {
9495            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
9496            if (!pSubCB) {
9497                skipCall |=
9498                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9499                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9500                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p in element %u of pCommandBuffers array.",
9501                            (void *)pCommandBuffers[i], i);
9502            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
9503                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9504                                    __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9505                                    "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
9506                                    "array. All cmd buffers in pCommandBuffers array must be secondary.",
9507                                    (void *)pCommandBuffers[i], i);
9508            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
9509                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
9510                    skipCall |= log_msg(
9511                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9512                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
9513                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
9514                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
9515                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass);
9516                } else {
9517                    // Make sure render pass is compatible with parent command buffer pass if has continue
9518                    skipCall |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->renderPass, pCommandBuffers[i],
9519                                                                pSubCB->beginInfo.pInheritanceInfo->renderPass);
9520                    skipCall |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
9521                }
9522                string errorString = "";
9523                if (!verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->renderPass,
9524                                                     pSubCB->beginInfo.pInheritanceInfo->renderPass, errorString)) {
9525                    skipCall |= log_msg(
9526                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9527                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9528                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
9529                        ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
9530                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
9531                        (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str());
9532                }
9533                //  If framebuffer for secondary CB is not NULL, then it must match FB from vkCmdBeginRenderPass()
9534                //   that this CB will be executed in AND framebuffer must have been created w/ RP compatible w/ renderpass
9535                if (pSubCB->beginInfo.pInheritanceInfo->framebuffer) {
9536                    if (pSubCB->beginInfo.pInheritanceInfo->framebuffer != pCB->activeRenderPassBeginInfo.framebuffer) {
9537                        skipCall |= log_msg(
9538                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9539                            (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
9540                            "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) references framebuffer (0x%" PRIxLEAST64
9541                            ") that does not match framebuffer (0x%" PRIxLEAST64 ") in active renderpass (0x%" PRIxLEAST64 ").",
9542                            (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer,
9543                            (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass->renderPass);
9544                    }
9545                }
9546            }
9547            // TODO(mlentine): Move more logic into this method
9548            skipCall |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
9549            skipCall |= validateCommandBufferState(dev_data, pSubCB);
9550            // Secondary cmdBuffers are considered pending execution starting w/
9551            // being recorded
9552            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
9553                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
9554                    skipCall |= log_msg(
9555                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9556                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9557                        "Attempt to simultaneously execute CB 0x%" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
9558                        "set!",
9559                        (uint64_t)(pCB->commandBuffer));
9560                }
9561                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
9562                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
9563                    skipCall |= log_msg(
9564                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9565                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9566                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIxLEAST64
9567                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
9568                        "(0x%" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
9569                                          "set, even though it does.",
9570                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
9571                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
9572                }
9573            }
9574            if (!pCB->activeQueries.empty() && !dev_data->phys_dev_properties.features.inheritedQueries) {
9575                skipCall |=
9576                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9577                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
9578                            "vkCmdExecuteCommands(): Secondary Command Buffer "
9579                            "(0x%" PRIxLEAST64 ") cannot be submitted with a query in "
9580                            "flight and inherited queries not "
9581                            "supported on this device.",
9582                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
9583            }
9584            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
9585            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
9586            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
9587        }
9588        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
9589        skipCall |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
9590    }
9591    lock.unlock();
9592    if (!skipCall)
9593        dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
9594}
9595
9596static bool ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) {
9597    bool skip_call = false;
9598    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9599    auto mem_info = getMemObjInfo(dev_data, mem);
9600    if ((mem_info) && (mem_info->image != VK_NULL_HANDLE)) {
9601        std::vector<VkImageLayout> layouts;
9602        if (FindLayouts(dev_data, mem_info->image, layouts)) {
9603            for (auto layout : layouts) {
9604                if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
9605                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9606                                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
9607                                                                                         "GENERAL or PREINITIALIZED are supported.",
9608                                         string_VkImageLayout(layout));
9609                }
9610            }
9611        }
9612    }
9613    return skip_call;
9614}
9615
9616VKAPI_ATTR VkResult VKAPI_CALL
9617MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
9618    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9619
9620    bool skip_call = false;
9621    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9622    std::unique_lock<std::mutex> lock(global_lock);
9623#if MTMERGESOURCE
9624    DEVICE_MEM_INFO *pMemObj = getMemObjInfo(dev_data, mem);
9625    if (pMemObj) {
9626        pMemObj->valid = true;
9627        if ((dev_data->phys_dev_mem_props.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags &
9628             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
9629            skip_call =
9630                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9631                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
9632                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
9633        }
9634    }
9635    skip_call |= validateMemRange(dev_data, mem, offset, size);
9636#endif
9637    skip_call |= ValidateMapImageLayouts(device, mem);
9638    lock.unlock();
9639
9640    if (!skip_call) {
9641        result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData);
9642        if (VK_SUCCESS == result) {
9643#if MTMERGESOURCE
9644            lock.lock();
9645            storeMemRanges(dev_data, mem, offset, size);
9646            initializeAndTrackMemory(dev_data, mem, size, ppData);
9647            lock.unlock();
9648#endif
9649        }
9650    }
9651    return result;
9652}
9653
9654VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
9655    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9656    bool skipCall = false;
9657
9658    std::unique_lock<std::mutex> lock(global_lock);
9659    skipCall |= deleteMemRanges(my_data, mem);
9660    lock.unlock();
9661    if (!skipCall) {
9662        my_data->device_dispatch_table->UnmapMemory(device, mem);
9663    }
9664}
9665
9666static bool validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount,
9667                                   const VkMappedMemoryRange *pMemRanges) {
9668    bool skipCall = false;
9669    for (uint32_t i = 0; i < memRangeCount; ++i) {
9670        auto mem_info = getMemObjInfo(my_data, pMemRanges[i].memory);
9671        if (mem_info) {
9672            if (mem_info->memRange.offset > pMemRanges[i].offset) {
9673                skipCall |=
9674                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9675                            (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
9676                            "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
9677                            "(" PRINTF_SIZE_T_SPECIFIER ").",
9678                            funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->memRange.offset));
9679            }
9680
9681            const uint64_t my_dataTerminus =
9682                    (mem_info->memRange.size == VK_WHOLE_SIZE) ? mem_info->allocInfo.allocationSize :
9683                                                                           (mem_info->memRange.offset + mem_info->memRange.size);
9684            if (pMemRanges[i].size != VK_WHOLE_SIZE && (my_dataTerminus < (pMemRanges[i].offset + pMemRanges[i].size))) {
9685                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9686                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9687                                    MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
9688                                                                 ") exceeds the Memory Object's upper-bound "
9689                                                                 "(" PRINTF_SIZE_T_SPECIFIER ").",
9690                                    funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
9691                                    static_cast<size_t>(my_dataTerminus));
9692            }
9693        }
9694    }
9695    return skipCall;
9696}
9697
9698static bool validateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount,
9699                                                     const VkMappedMemoryRange *pMemRanges) {
9700    bool skipCall = false;
9701    for (uint32_t i = 0; i < memRangeCount; ++i) {
9702        auto mem_info = getMemObjInfo(my_data, pMemRanges[i].memory);
9703        if (mem_info) {
9704            if (mem_info->pData) {
9705                VkDeviceSize size = mem_info->memRange.size;
9706                VkDeviceSize half_size = (size / 2);
9707                char *data = static_cast<char *>(mem_info->pData);
9708                for (auto j = 0; j < half_size; ++j) {
9709                    if (data[j] != NoncoherentMemoryFillValue) {
9710                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9711                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9712                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj 0x%" PRIxLEAST64,
9713                                            (uint64_t)pMemRanges[i].memory);
9714                    }
9715                }
9716                for (auto j = size + half_size; j < 2 * size; ++j) {
9717                    if (data[j] != NoncoherentMemoryFillValue) {
9718                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9719                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9720                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj 0x%" PRIxLEAST64,
9721                                            (uint64_t)pMemRanges[i].memory);
9722                    }
9723                }
9724                memcpy(mem_info->pDriverData, static_cast<void *>(data + (size_t)(half_size)), (size_t)(size));
9725            }
9726        }
9727    }
9728    return skipCall;
9729}
9730
9731VkResult VKAPI_CALL
9732FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
9733    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9734    bool skipCall = false;
9735    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9736
9737    std::unique_lock<std::mutex> lock(global_lock);
9738    skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
9739    skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
9740    lock.unlock();
9741    if (!skipCall) {
9742        result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
9743    }
9744    return result;
9745}
9746
9747VkResult VKAPI_CALL
9748InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
9749    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9750    bool skipCall = false;
9751    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9752
9753    std::unique_lock<std::mutex> lock(global_lock);
9754    skipCall |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
9755    lock.unlock();
9756    if (!skipCall) {
9757        result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
9758    }
9759    return result;
9760}
9761
9762VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
9763    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9764    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9765    bool skipCall = false;
9766    std::unique_lock<std::mutex> lock(global_lock);
9767    auto image_node = getImageNode(dev_data, image);
9768    if (image_node) {
9769        // Track objects tied to memory
9770        uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
9771        skipCall = set_mem_binding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
9772        VkMemoryRequirements memRequirements;
9773        lock.unlock();
9774        dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements);
9775        lock.lock();
9776
9777        // Track and validate bound memory range information
9778        auto mem_info = getMemObjInfo(dev_data, mem);
9779        if (mem_info) {
9780            const MEMORY_RANGE range =
9781                insert_memory_ranges(image_handle, mem, memoryOffset, memRequirements, mem_info->imageRanges);
9782            skipCall |= validate_memory_range(dev_data, mem_info->bufferRanges, range, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
9783            skipCall |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "vkBindImageMemory");
9784        }
9785
9786        print_mem_list(dev_data);
9787        lock.unlock();
9788        if (!skipCall) {
9789            result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset);
9790            lock.lock();
9791            dev_data->memObjMap[mem].get()->image = image;
9792            image_node->mem = mem;
9793            image_node->memOffset = memoryOffset;
9794            image_node->memSize = memRequirements.size;
9795            lock.unlock();
9796        }
9797    } else {
9798        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9799                reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
9800                "vkBindImageMemory: Cannot find invalid image 0x%" PRIx64 ", has it already been deleted?",
9801                reinterpret_cast<const uint64_t &>(image));
9802    }
9803    return result;
9804}
9805
9806VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
9807    bool skip_call = false;
9808    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9809    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9810    std::unique_lock<std::mutex> lock(global_lock);
9811    auto event_node = dev_data->eventMap.find(event);
9812    if (event_node != dev_data->eventMap.end()) {
9813        event_node->second.needsSignaled = false;
9814        event_node->second.stageMask = VK_PIPELINE_STAGE_HOST_BIT;
9815        if (event_node->second.write_in_use) {
9816            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
9817                                 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9818                                 "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
9819                                 reinterpret_cast<const uint64_t &>(event));
9820        }
9821    }
9822    lock.unlock();
9823    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
9824    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
9825    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
9826    for (auto queue_data : dev_data->queueMap) {
9827        auto event_entry = queue_data.second.eventToStageMap.find(event);
9828        if (event_entry != queue_data.second.eventToStageMap.end()) {
9829            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
9830        }
9831    }
9832    if (!skip_call)
9833        result = dev_data->device_dispatch_table->SetEvent(device, event);
9834    return result;
9835}
9836
9837VKAPI_ATTR VkResult VKAPI_CALL
9838QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
9839    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
9840    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9841    bool skip_call = false;
9842    std::unique_lock<std::mutex> lock(global_lock);
9843    auto pFence = getFenceNode(dev_data, fence);
9844    auto pQueue = getQueueNode(dev_data, queue);
9845
9846    // First verify that fence is not in use
9847    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
9848
9849    if (fence != VK_NULL_HANDLE) {
9850        SubmitFence(pQueue, pFence);
9851    }
9852
9853    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
9854        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
9855        // Track objects tied to memory
9856        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
9857            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
9858                if (set_sparse_mem_binding(dev_data, bindInfo.pBufferBinds[j].pBinds[k].memory,
9859                                           (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
9860                                           "vkQueueBindSparse"))
9861                    skip_call = true;
9862            }
9863        }
9864        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
9865            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
9866                if (set_sparse_mem_binding(dev_data, bindInfo.pImageOpaqueBinds[j].pBinds[k].memory,
9867                                           (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9868                                           "vkQueueBindSparse"))
9869                    skip_call = true;
9870            }
9871        }
9872        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
9873            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
9874                if (set_sparse_mem_binding(dev_data, bindInfo.pImageBinds[j].pBinds[k].memory,
9875                                           (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9876                                           "vkQueueBindSparse"))
9877                    skip_call = true;
9878            }
9879        }
9880        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
9881            const VkSemaphore &semaphore = bindInfo.pWaitSemaphores[i];
9882            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
9883                if (dev_data->semaphoreMap[semaphore].signaled) {
9884                    dev_data->semaphoreMap[semaphore].signaled = false;
9885                } else {
9886                    skip_call |=
9887                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9888                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9889                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64
9890                                " that has no way to be signaled.",
9891                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
9892                }
9893            }
9894        }
9895        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
9896            const VkSemaphore &semaphore = bindInfo.pSignalSemaphores[i];
9897            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
9898                if (dev_data->semaphoreMap[semaphore].signaled) {
9899                    skip_call =
9900                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9901                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9902                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
9903                                ", but that semaphore is already signaled.",
9904                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
9905                }
9906                dev_data->semaphoreMap[semaphore].signaled = true;
9907            }
9908        }
9909    }
9910    print_mem_list(dev_data);
9911    lock.unlock();
9912
9913    if (!skip_call)
9914        return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
9915
9916    return result;
9917}
9918
9919VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
9920                                               const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
9921    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9922    VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
9923    if (result == VK_SUCCESS) {
9924        std::lock_guard<std::mutex> lock(global_lock);
9925        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
9926        sNode->signaled = false;
9927        sNode->queue = VK_NULL_HANDLE;
9928        sNode->in_use.store(0);
9929    }
9930    return result;
9931}
9932
9933VKAPI_ATTR VkResult VKAPI_CALL
9934CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
9935    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9936    VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
9937    if (result == VK_SUCCESS) {
9938        std::lock_guard<std::mutex> lock(global_lock);
9939        dev_data->eventMap[*pEvent].needsSignaled = false;
9940        dev_data->eventMap[*pEvent].in_use.store(0);
9941        dev_data->eventMap[*pEvent].write_in_use = 0;
9942        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
9943    }
9944    return result;
9945}
9946
9947VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
9948                                                  const VkAllocationCallbacks *pAllocator,
9949                                                  VkSwapchainKHR *pSwapchain) {
9950    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9951    VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
9952
9953    if (VK_SUCCESS == result) {
9954        std::lock_guard<std::mutex> lock(global_lock);
9955        dev_data->device_extensions.swapchainMap[*pSwapchain] = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo));
9956    }
9957
9958    return result;
9959}
9960
9961VKAPI_ATTR void VKAPI_CALL
9962DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
9963    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9964    bool skipCall = false;
9965
9966    std::unique_lock<std::mutex> lock(global_lock);
9967    auto swapchain_data = getSwapchainNode(dev_data, swapchain);
9968    if (swapchain_data) {
9969        if (swapchain_data->images.size() > 0) {
9970            for (auto swapchain_image : swapchain_data->images) {
9971                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
9972                if (image_sub != dev_data->imageSubresourceMap.end()) {
9973                    for (auto imgsubpair : image_sub->second) {
9974                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
9975                        if (image_item != dev_data->imageLayoutMap.end()) {
9976                            dev_data->imageLayoutMap.erase(image_item);
9977                        }
9978                    }
9979                    dev_data->imageSubresourceMap.erase(image_sub);
9980                }
9981                skipCall = clear_object_binding(dev_data, (uint64_t)swapchain_image,
9982                                                VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
9983                dev_data->imageMap.erase(swapchain_image);
9984            }
9985        }
9986        dev_data->device_extensions.swapchainMap.erase(swapchain);
9987    }
9988    lock.unlock();
9989    if (!skipCall)
9990        dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator);
9991}
9992
9993VKAPI_ATTR VkResult VKAPI_CALL
9994GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
9995    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9996    VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
9997
9998    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
9999        // This should never happen and is checked by param checker.
10000        if (!pCount)
10001            return result;
10002        std::lock_guard<std::mutex> lock(global_lock);
10003        const size_t count = *pCount;
10004        auto swapchain_node = getSwapchainNode(dev_data, swapchain);
10005        if (swapchain_node && !swapchain_node->images.empty()) {
10006            // TODO : Not sure I like the memcmp here, but it works
10007            const bool mismatch = (swapchain_node->images.size() != count ||
10008                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
10009            if (mismatch) {
10010                // TODO: Verify against Valid Usage section of extension
10011                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10012                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
10013                        "vkGetSwapchainInfoKHR(0x%" PRIx64
10014                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
10015                        (uint64_t)(swapchain));
10016            }
10017        }
10018        for (uint32_t i = 0; i < *pCount; ++i) {
10019            IMAGE_LAYOUT_NODE image_layout_node;
10020            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
10021            image_layout_node.format = swapchain_node->createInfo.imageFormat;
10022            // Add imageMap entries for each swapchain image
10023            VkImageCreateInfo image_ci = {};
10024            image_ci.mipLevels = 1;
10025            image_ci.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
10026            image_ci.usage = swapchain_node->createInfo.imageUsage;
10027            image_ci.format = swapchain_node->createInfo.imageFormat;
10028            image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
10029            image_ci.extent.width = swapchain_node->createInfo.imageExtent.width;
10030            image_ci.extent.height = swapchain_node->createInfo.imageExtent.height;
10031            image_ci.sharingMode = swapchain_node->createInfo.imageSharingMode;
10032            dev_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_NODE>(new IMAGE_NODE(&image_ci));
10033            auto &image_node = dev_data->imageMap[pSwapchainImages[i]];
10034            image_node->valid = false;
10035            image_node->mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
10036            swapchain_node->images.push_back(pSwapchainImages[i]);
10037            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
10038            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
10039            dev_data->imageLayoutMap[subpair] = image_layout_node;
10040            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
10041        }
10042    }
10043    return result;
10044}
10045
10046VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
10047    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10048    bool skip_call = false;
10049
10050    std::lock_guard<std::mutex> lock(global_lock);
10051    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10052        auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
10053        if (pSemaphore && !pSemaphore->signaled) {
10054            skip_call |=
10055                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10056                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10057                            "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
10058                            reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(pPresentInfo->pWaitSemaphores[i]));
10059        }
10060    }
10061    VkDeviceMemory mem;
10062    for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10063        auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
10064        if (swapchain_data && pPresentInfo->pImageIndices[i] < swapchain_data->images.size()) {
10065            VkImage image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
10066#if MTMERGESOURCE
10067            skip_call |=
10068                    get_mem_binding_from_object(dev_data, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
10069            skip_call |= validate_memory_is_valid(dev_data, mem, "vkQueuePresentKHR()", image);
10070#endif
10071            vector<VkImageLayout> layouts;
10072            if (FindLayouts(dev_data, image, layouts)) {
10073                for (auto layout : layouts) {
10074                    if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
10075                        skip_call |=
10076                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
10077                                        reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10078                                        "Images passed to present must be in layout "
10079                                        "PRESENT_SOURCE_KHR but is in %s",
10080                                        string_VkImageLayout(layout));
10081                    }
10082                }
10083            }
10084        }
10085    }
10086
10087    if (skip_call) {
10088        return VK_ERROR_VALIDATION_FAILED_EXT;
10089    }
10090
10091    VkResult result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo);
10092
10093    if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
10094        // Semaphore waits occur before error generation, if the call reached
10095        // the ICD. (Confirm?)
10096        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10097            auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
10098            if (pSemaphore && pSemaphore->signaled) {
10099                pSemaphore->signaled = false;
10100            }
10101        }
10102    }
10103
10104    return result;
10105}
10106
10107VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
10108                                                   VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
10109    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10110    bool skipCall = false;
10111
10112    std::unique_lock<std::mutex> lock(global_lock);
10113    auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
10114    if (pSemaphore && pSemaphore->signaled) {
10115        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10116                           reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10117                           "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
10118    }
10119
10120    auto pFence = getFenceNode(dev_data, fence);
10121    if (pFence) {
10122        skipCall |= ValidateFenceForSubmit(dev_data, pFence);
10123    }
10124    lock.unlock();
10125
10126    if (skipCall)
10127        return VK_ERROR_VALIDATION_FAILED_EXT;
10128
10129    VkResult result =
10130            dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
10131
10132    lock.lock();
10133    if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
10134        if (pFence) {
10135            pFence->state = FENCE_INFLIGHT;
10136        }
10137
10138        // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore
10139        if (pSemaphore) {
10140            pSemaphore->signaled = true;
10141        }
10142    }
10143    lock.unlock();
10144
10145    return result;
10146}
10147
10148VKAPI_ATTR VkResult VKAPI_CALL
10149CreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10150                             const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
10151    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10152    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10153    VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10154    if (VK_SUCCESS == res) {
10155        std::lock_guard<std::mutex> lock(global_lock);
10156        res = layer_create_msg_callback(my_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
10157    }
10158    return res;
10159}
10160
10161VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance,
10162                                                         VkDebugReportCallbackEXT msgCallback,
10163                                                         const VkAllocationCallbacks *pAllocator) {
10164    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10165    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10166    pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10167    std::lock_guard<std::mutex> lock(global_lock);
10168    layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
10169}
10170
10171VKAPI_ATTR void VKAPI_CALL
10172DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
10173                      size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10174    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10175    my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
10176                                                            pMsg);
10177}
10178
10179VKAPI_ATTR VkResult VKAPI_CALL
10180EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
10181    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
10182}
10183
10184VKAPI_ATTR VkResult VKAPI_CALL
10185EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
10186    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
10187}
10188
10189VKAPI_ATTR VkResult VKAPI_CALL
10190EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
10191    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
10192        return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
10193
10194    return VK_ERROR_LAYER_NOT_PRESENT;
10195}
10196
10197VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
10198                                                                  const char *pLayerName, uint32_t *pCount,
10199                                                                  VkExtensionProperties *pProperties) {
10200    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
10201        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
10202
10203    assert(physicalDevice);
10204
10205    dispatch_key key = get_dispatch_key(physicalDevice);
10206    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
10207    return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
10208}
10209
10210static PFN_vkVoidFunction
10211intercept_core_instance_command(const char *name);
10212
10213static PFN_vkVoidFunction
10214intercept_core_device_command(const char *name);
10215
10216static PFN_vkVoidFunction
10217intercept_khr_swapchain_command(const char *name, VkDevice dev);
10218
10219VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) {
10220    PFN_vkVoidFunction proc = intercept_core_device_command(funcName);
10221    if (proc)
10222        return proc;
10223
10224    assert(dev);
10225
10226    proc = intercept_khr_swapchain_command(funcName, dev);
10227    if (proc)
10228        return proc;
10229
10230    layer_data *dev_data;
10231    dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
10232
10233    VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
10234    {
10235        if (pTable->GetDeviceProcAddr == NULL)
10236            return NULL;
10237        return pTable->GetDeviceProcAddr(dev, funcName);
10238    }
10239}
10240
10241VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
10242    PFN_vkVoidFunction proc = intercept_core_instance_command(funcName);
10243    if (!proc)
10244        proc = intercept_core_device_command(funcName);
10245    if (!proc)
10246        proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE);
10247    if (proc)
10248        return proc;
10249
10250    assert(instance);
10251
10252    layer_data *my_data;
10253    my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10254    proc = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
10255    if (proc)
10256        return proc;
10257
10258    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10259    if (pTable->GetInstanceProcAddr == NULL)
10260        return NULL;
10261    return pTable->GetInstanceProcAddr(instance, funcName);
10262}
10263
10264static PFN_vkVoidFunction
10265intercept_core_instance_command(const char *name) {
10266    static const struct {
10267        const char *name;
10268        PFN_vkVoidFunction proc;
10269    } core_instance_commands[] = {
10270        { "vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr) },
10271        { "vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr) },
10272        { "vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance) },
10273        { "vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice) },
10274        { "vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance) },
10275        { "vkEnumerateInstanceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceLayerProperties) },
10276        { "vkEnumerateDeviceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceLayerProperties) },
10277        { "vkEnumerateInstanceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceExtensionProperties) },
10278        { "vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties) },
10279    };
10280
10281    for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) {
10282        if (!strcmp(core_instance_commands[i].name, name))
10283            return core_instance_commands[i].proc;
10284    }
10285
10286    return nullptr;
10287}
10288
10289static PFN_vkVoidFunction
10290intercept_core_device_command(const char *name) {
10291    static const struct {
10292        const char *name;
10293        PFN_vkVoidFunction proc;
10294    } core_device_commands[] = {
10295        {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
10296        {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)},
10297        {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)},
10298        {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)},
10299        {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)},
10300        {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)},
10301        {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)},
10302        {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
10303        {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)},
10304        {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)},
10305        {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)},
10306        {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)},
10307        {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)},
10308        {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)},
10309        {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)},
10310        {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)},
10311        {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)},
10312        {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)},
10313        {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)},
10314        {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)},
10315        {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)},
10316        {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)},
10317        {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)},
10318        {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)},
10319        {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)},
10320        {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)},
10321        {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)},
10322        {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)},
10323        {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)},
10324        {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)},
10325        {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)},
10326        {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)},
10327        {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)},
10328        {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)},
10329        {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)},
10330        {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)},
10331        {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)},
10332        {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)},
10333        {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)},
10334        {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)},
10335        {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)},
10336        {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)},
10337        {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)},
10338        {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)},
10339        {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)},
10340        {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)},
10341        {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)},
10342        {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)},
10343        {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)},
10344        {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)},
10345        {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)},
10346        {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)},
10347        {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)},
10348        {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)},
10349        {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)},
10350        {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)},
10351        {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)},
10352        {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)},
10353        {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)},
10354        {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)},
10355        {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)},
10356        {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)},
10357        {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)},
10358        {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)},
10359        {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)},
10360        {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)},
10361        {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)},
10362        {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)},
10363        {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)},
10364        {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)},
10365        {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)},
10366        {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)},
10367        {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)},
10368        {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)},
10369        {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)},
10370        {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)},
10371        {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)},
10372        {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)},
10373        {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)},
10374        {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)},
10375        {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)},
10376        {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)},
10377        {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)},
10378        {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)},
10379        {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)},
10380        {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)},
10381        {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)},
10382        {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)},
10383        {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)},
10384        {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)},
10385        {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)},
10386        {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)},
10387        {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)},
10388        {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)},
10389        {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)},
10390        {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)},
10391        {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)},
10392        {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)},
10393        {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)},
10394        {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)},
10395        {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)},
10396        {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)},
10397        {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)},
10398        {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)},
10399        {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)},
10400        {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)},
10401        {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)},
10402        {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)},
10403        {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)},
10404        {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)},
10405        {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)},
10406        {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)},
10407        {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)},
10408        {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)},
10409        {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)},
10410        {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)},
10411    };
10412
10413    for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) {
10414        if (!strcmp(core_device_commands[i].name, name))
10415            return core_device_commands[i].proc;
10416    }
10417
10418    return nullptr;
10419}
10420
10421static PFN_vkVoidFunction
10422intercept_khr_swapchain_command(const char *name, VkDevice dev) {
10423    static const struct {
10424        const char *name;
10425        PFN_vkVoidFunction proc;
10426    } khr_swapchain_commands[] = {
10427        { "vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR) },
10428        { "vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR) },
10429        { "vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR) },
10430        { "vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR) },
10431        { "vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR) },
10432    };
10433
10434    if (dev) {
10435        layer_data *dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
10436        if (!dev_data->device_extensions.wsi_enabled)
10437            return nullptr;
10438    }
10439
10440    for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) {
10441        if (!strcmp(khr_swapchain_commands[i].name, name))
10442            return khr_swapchain_commands[i].proc;
10443    }
10444
10445    return nullptr;
10446}
10447
10448} // namespace core_validation
10449
10450// vk_layer_logging.h expects these to be defined
10451
10452VKAPI_ATTR VkResult VKAPI_CALL
10453vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10454                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
10455    return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10456}
10457
10458VKAPI_ATTR void VKAPI_CALL
10459vkDestroyDebugReportCallbackEXT(VkInstance instance,
10460                                VkDebugReportCallbackEXT msgCallback,
10461                                const VkAllocationCallbacks *pAllocator) {
10462    core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10463}
10464
10465VKAPI_ATTR void VKAPI_CALL
10466vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
10467                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10468    core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
10469}
10470
10471// loader-layer interface v0, just wrappers since there is only a layer
10472
10473VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10474vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
10475    return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
10476}
10477
10478VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10479vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
10480    return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
10481}
10482
10483VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10484vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
10485    // the layer command handles VK_NULL_HANDLE just fine internally
10486    assert(physicalDevice == VK_NULL_HANDLE);
10487    return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
10488}
10489
10490VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
10491                                                                                    const char *pLayerName, uint32_t *pCount,
10492                                                                                    VkExtensionProperties *pProperties) {
10493    // the layer command handles VK_NULL_HANDLE just fine internally
10494    assert(physicalDevice == VK_NULL_HANDLE);
10495    return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
10496}
10497
10498VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10499    return core_validation::GetDeviceProcAddr(dev, funcName);
10500}
10501
10502VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
10503    return core_validation::GetInstanceProcAddr(instance, funcName);
10504}
10505