1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 */
26
27// Allow use of STL min and max functions in Windows
28#define NOMINMAX
29
30// Turn on mem_tracker merged code
31#define MTMERGESOURCE 1
32
33#include <SPIRV/spirv.hpp>
34#include <algorithm>
35#include <assert.h>
36#include <iostream>
37#include <list>
38#include <map>
39#include <mutex>
40#include <set>
41#include <stdio.h>
42#include <stdlib.h>
43#include <string.h>
44#include <string>
45#include <tuple>
46
47#include "vk_loader_platform.h"
48#include "vk_dispatch_table_helper.h"
49#include "vk_struct_string_helper_cpp.h"
50#if defined(__GNUC__)
51#pragma GCC diagnostic ignored "-Wwrite-strings"
52#endif
53#if defined(__GNUC__)
54#pragma GCC diagnostic warning "-Wwrite-strings"
55#endif
56#include "vk_struct_size_helper.h"
57#include "core_validation.h"
58#include "vk_layer_table.h"
59#include "vk_layer_data.h"
60#include "vk_layer_extension_utils.h"
61#include "vk_layer_utils.h"
62#include "spirv-tools/libspirv.h"
63
64#if defined __ANDROID__
65#include <android/log.h>
66#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
67#else
68#define LOGCONSOLE(...)                                                                                                            \
69    {                                                                                                                              \
70        printf(__VA_ARGS__);                                                                                                       \
71        printf("\n");                                                                                                              \
72    }
73#endif
74
75using namespace std;
76
77// TODO : CB really needs it's own class and files so this is just temp code until that happens
78GLOBAL_CB_NODE::~GLOBAL_CB_NODE() {
79    for (uint32_t i=0; i<VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
80        // Make sure that no sets hold onto deleted CB binding
81        for (auto set : lastBound[i].uniqueBoundSets) {
82            set->RemoveBoundCommandBuffer(this);
83        }
84    }
85}
86
87namespace core_validation {
88
89using std::unordered_map;
90using std::unordered_set;
91
92// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
93// Object value will be used to identify them internally.
94static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
95
96// Track command pools and their command buffers
97struct CMD_POOL_INFO {
98    VkCommandPoolCreateFlags createFlags;
99    uint32_t queueFamilyIndex;
100    list<VkCommandBuffer> commandBuffers; // list container of cmd buffers allocated from this pool
101};
102
103struct devExts {
104    bool wsi_enabled;
105    unordered_map<VkSwapchainKHR, SWAPCHAIN_NODE *> swapchainMap;
106    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
107};
108
109// fwd decls
110struct shader_module;
111
112// TODO : Split this into separate structs for instance and device level data?
113struct layer_data {
114    VkInstance instance;
115
116    debug_report_data *report_data;
117    std::vector<VkDebugReportCallbackEXT> logging_callback;
118    VkLayerDispatchTable *device_dispatch_table;
119    VkLayerInstanceDispatchTable *instance_dispatch_table;
120
121    devExts device_extensions;
122    unordered_set<VkQueue> queues;  // all queues under given device
123    // Global set of all cmdBuffers that are inFlight on this device
124    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
125    // Layer specific data
126    unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> samplerMap;
127    unordered_map<VkImageView, VkImageViewCreateInfo> imageViewMap;
128    unordered_map<VkImage, IMAGE_NODE> imageMap;
129    unordered_map<VkBufferView, VkBufferViewCreateInfo> bufferViewMap;
130    unordered_map<VkBuffer, BUFFER_NODE> bufferMap;
131    unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
132    unordered_map<VkCommandPool, CMD_POOL_INFO> commandPoolMap;
133    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
134    unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
135    unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
136    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
137    unordered_map<VkDeviceMemory, DEVICE_MEM_INFO> memObjMap;
138    unordered_map<VkFence, FENCE_NODE> fenceMap;
139    unordered_map<VkQueue, QUEUE_NODE> queueMap;
140    unordered_map<VkEvent, EVENT_NODE> eventMap;
141    unordered_map<QueryObject, bool> queryToStateMap;
142    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
143    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
144    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
145    unordered_map<VkFramebuffer, FRAMEBUFFER_NODE> frameBufferMap;
146    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
147    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
148    unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
149    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
150    VkDevice device;
151
152    // Device specific data
153    PHYS_DEV_PROPERTIES_NODE phys_dev_properties;
154    VkPhysicalDeviceMemoryProperties phys_dev_mem_props;
155
156    layer_data()
157        : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr), device_extensions(),
158          device(VK_NULL_HANDLE), phys_dev_properties{}, phys_dev_mem_props{} {};
159};
160
161// TODO : Do we need to guard access to layer_data_map w/ lock?
162static unordered_map<void *, layer_data *> layer_data_map;
163
164static const VkLayerProperties global_layer = {
165    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
166};
167
168template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
169    bool foundLayer = false;
170    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
171        if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
172            foundLayer = true;
173        }
174        // This has to be logged to console as we don't have a callback at this point.
175        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
176            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
177                       global_layer.layerName);
178        }
179    }
180}
181
182// Code imported from shader_checker
183static void build_def_index(shader_module *);
184
185// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
186// without the caller needing to care too much about the physical SPIRV module layout.
187struct spirv_inst_iter {
188    std::vector<uint32_t>::const_iterator zero;
189    std::vector<uint32_t>::const_iterator it;
190
191    uint32_t len() {
192        auto result = *it >> 16;
193        assert(result > 0);
194        return result;
195    }
196
197    uint32_t opcode() { return *it & 0x0ffffu; }
198
199    uint32_t const &word(unsigned n) {
200        assert(n < len());
201        return it[n];
202    }
203
204    uint32_t offset() { return (uint32_t)(it - zero); }
205
206    spirv_inst_iter() {}
207
208    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
209
210    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
211
212    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
213
214    spirv_inst_iter operator++(int) { /* x++ */
215        spirv_inst_iter ii = *this;
216        it += len();
217        return ii;
218    }
219
220    spirv_inst_iter operator++() { /* ++x; */
221        it += len();
222        return *this;
223    }
224
225    /* The iterator and the value are the same thing. */
226    spirv_inst_iter &operator*() { return *this; }
227    spirv_inst_iter const &operator*() const { return *this; }
228};
229
230struct shader_module {
231    /* the spirv image itself */
232    vector<uint32_t> words;
233    /* a mapping of <id> to the first word of its def. this is useful because walking type
234     * trees, constant expressions, etc requires jumping all over the instruction stream.
235     */
236    unordered_map<unsigned, unsigned> def_index;
237
238    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
239        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
240          def_index() {
241
242        build_def_index(this);
243    }
244
245    /* expose begin() / end() to enable range-based for */
246    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
247    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
248    /* given an offset into the module, produce an iterator there. */
249    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
250
251    /* gets an iterator to the definition of an id */
252    spirv_inst_iter get_def(unsigned id) const {
253        auto it = def_index.find(id);
254        if (it == def_index.end()) {
255            return end();
256        }
257        return at(it->second);
258    }
259};
260
261// TODO : This can be much smarter, using separate locks for separate global data
262static std::mutex global_lock;
263
264static VkDeviceMemory *get_object_mem_binding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
265    switch (type) {
266    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
267        auto it = my_data->imageMap.find(VkImage(handle));
268        if (it != my_data->imageMap.end())
269            return &(*it).second.mem;
270        break;
271    }
272    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
273        auto it = my_data->bufferMap.find(VkBuffer(handle));
274        if (it != my_data->bufferMap.end())
275            return &(*it).second.mem;
276        break;
277    }
278    default:
279        break;
280    }
281    return nullptr;
282}
283
284// prototype
285static GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer);
286
287// Helper function to validate correct usage bits set for buffers or images
288//  Verify that (actual & desired) flags != 0 or,
289//   if strict is true, verify that (actual & desired) flags == desired
290//  In case of error, report it via dbg callbacks
291static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict,
292                                     uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
293                                     char const *func_name, char const *usage_str) {
294    bool correct_usage = false;
295    bool skipCall = false;
296    if (strict)
297        correct_usage = ((actual & desired) == desired);
298    else
299        correct_usage = ((actual & desired) != 0);
300    if (!correct_usage) {
301        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
302                           MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s 0x%" PRIxLEAST64
303                                                               " used by %s. In this case, %s should have %s set during creation.",
304                           ty_str, obj_handle, func_name, ty_str, usage_str);
305    }
306    return skipCall;
307}
308
309// Helper function to validate usage flags for images
310// Pulls image info and then sends actual vs. desired usage off to helper above where
311//  an error will be flagged if usage is not correct
312static bool validate_image_usage_flags(layer_data *dev_data, VkImage image, VkFlags desired, VkBool32 strict,
313                                           char const *func_name, char const *usage_string) {
314    bool skipCall = false;
315    auto const image_node = dev_data->imageMap.find(image);
316    if (image_node != dev_data->imageMap.end()) {
317        skipCall = validate_usage_flags(dev_data, image_node->second.createInfo.usage, desired, strict, (uint64_t)image,
318                                        VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "image", func_name, usage_string);
319    }
320    return skipCall;
321}
322
323// Helper function to validate usage flags for buffers
324// Pulls buffer info and then sends actual vs. desired usage off to helper above where
325//  an error will be flagged if usage is not correct
326static bool validate_buffer_usage_flags(layer_data *dev_data, VkBuffer buffer, VkFlags desired, VkBool32 strict,
327                                            char const *func_name, char const *usage_string) {
328    bool skipCall = false;
329    auto const buffer_node = dev_data->bufferMap.find(buffer);
330    if (buffer_node != dev_data->bufferMap.end()) {
331        skipCall = validate_usage_flags(dev_data, buffer_node->second.createInfo.usage, desired, strict, (uint64_t)buffer,
332                                        VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string);
333    }
334    return skipCall;
335}
336
337// Return ptr to info in map container containing mem, or NULL if not found
338//  Calls to this function should be wrapped in mutex
339static DEVICE_MEM_INFO *get_mem_obj_info(layer_data *dev_data, const VkDeviceMemory mem) {
340    auto item = dev_data->memObjMap.find(mem);
341    if (item != dev_data->memObjMap.end()) {
342        return &(*item).second;
343    } else {
344        return NULL;
345    }
346}
347
348static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
349                             const VkMemoryAllocateInfo *pAllocateInfo) {
350    assert(object != NULL);
351
352    memcpy(&my_data->memObjMap[mem].allocInfo, pAllocateInfo, sizeof(VkMemoryAllocateInfo));
353    // TODO:  Update for real hardware, actually process allocation info structures
354    my_data->memObjMap[mem].allocInfo.pNext = NULL;
355    my_data->memObjMap[mem].object = object;
356    my_data->memObjMap[mem].mem = mem;
357    my_data->memObjMap[mem].image = VK_NULL_HANDLE;
358    my_data->memObjMap[mem].memRange.offset = 0;
359    my_data->memObjMap[mem].memRange.size = 0;
360    my_data->memObjMap[mem].pData = 0;
361    my_data->memObjMap[mem].pDriverData = 0;
362    my_data->memObjMap[mem].valid = false;
363}
364
365static bool validate_memory_is_valid(layer_data *dev_data, VkDeviceMemory mem, const char *functionName,
366                                     VkImage image = VK_NULL_HANDLE) {
367    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
368        auto const image_node = dev_data->imageMap.find(image);
369        if (image_node != dev_data->imageMap.end() && !image_node->second.valid) {
370            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
371                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
372                           "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
373                           functionName, (uint64_t)(image));
374        }
375    } else {
376        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
377        if (pMemObj && !pMemObj->valid) {
378            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
379                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
380                           "%s: Cannot read invalid memory 0x%" PRIx64 ", please fill the memory before using.", functionName,
381                           (uint64_t)(mem));
382        }
383    }
384    return false;
385}
386
387static void set_memory_valid(layer_data *dev_data, VkDeviceMemory mem, bool valid, VkImage image = VK_NULL_HANDLE) {
388    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
389        auto image_node = dev_data->imageMap.find(image);
390        if (image_node != dev_data->imageMap.end()) {
391            image_node->second.valid = valid;
392        }
393    } else {
394        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
395        if (pMemObj) {
396            pMemObj->valid = valid;
397        }
398    }
399}
400
401// Find CB Info and add mem reference to list container
402// Find Mem Obj Info and add CB reference to list container
403static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
404                                              const char *apiName) {
405    bool skipCall = false;
406
407    // Skip validation if this image was created through WSI
408    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
409
410        // First update CB binding in MemObj mini CB list
411        DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
412        if (pMemInfo) {
413            pMemInfo->commandBufferBindings.insert(cb);
414            // Now update CBInfo's Mem reference list
415            GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
416            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
417            if (pCBNode) {
418                pCBNode->memObjs.insert(mem);
419            }
420        }
421    }
422    return skipCall;
423}
424// For every mem obj bound to particular CB, free bindings related to that CB
425static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *pCBNode) {
426    if (pCBNode) {
427        if (pCBNode->memObjs.size() > 0) {
428            for (auto mem : pCBNode->memObjs) {
429                DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
430                if (pInfo) {
431                    pInfo->commandBufferBindings.erase(pCBNode->commandBuffer);
432                }
433            }
434            pCBNode->memObjs.clear();
435        }
436        pCBNode->validate_functions.clear();
437    }
438}
439// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
440static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
441    clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
442}
443
444// For given MemObjInfo, report Obj & CB bindings
445static bool reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
446    bool skipCall = false;
447    size_t cmdBufRefCount = pMemObjInfo->commandBufferBindings.size();
448    size_t objRefCount = pMemObjInfo->objBindings.size();
449
450    if ((pMemObjInfo->commandBufferBindings.size()) != 0) {
451        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
452                           (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
453                           "Attempting to free memory object 0x%" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
454                           " references",
455                           (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
456    }
457
458    if (cmdBufRefCount > 0 && pMemObjInfo->commandBufferBindings.size() > 0) {
459        for (auto cb : pMemObjInfo->commandBufferBindings) {
460            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
461                    (uint64_t)cb, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
462                    "Command Buffer 0x%p still has a reference to mem obj 0x%" PRIxLEAST64, cb, (uint64_t)pMemObjInfo->mem);
463        }
464        // Clear the list of hanging references
465        pMemObjInfo->commandBufferBindings.clear();
466    }
467
468    if (objRefCount > 0 && pMemObjInfo->objBindings.size() > 0) {
469        for (auto obj : pMemObjInfo->objBindings) {
470            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__,
471                    MEMTRACK_FREED_MEM_REF, "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64,
472                    obj.handle, (uint64_t)pMemObjInfo->mem);
473        }
474        // Clear the list of hanging references
475        pMemObjInfo->objBindings.clear();
476    }
477    return skipCall;
478}
479
480static bool deleteMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem) {
481    bool skipCall = false;
482    auto item = my_data->memObjMap.find(mem);
483    if (item != my_data->memObjMap.end()) {
484        my_data->memObjMap.erase(item);
485    } else {
486        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
487                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
488                           "Request to delete memory object 0x%" PRIxLEAST64 " not present in memory Object Map", (uint64_t)mem);
489    }
490    return skipCall;
491}
492
493static bool freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, bool internal) {
494    bool skipCall = false;
495    // Parse global list to find info w/ mem
496    DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
497    if (pInfo) {
498        if (pInfo->allocInfo.allocationSize == 0 && !internal) {
499            // TODO: Verify against Valid Use section
500            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
501                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
502                               "Attempting to free memory associated with a Persistent Image, 0x%" PRIxLEAST64 ", "
503                               "this should not be explicitly freed\n",
504                               (uint64_t)mem);
505        } else {
506            // Clear any CB bindings for completed CBs
507            //   TODO : Is there a better place to do this?
508
509            assert(pInfo->object != VK_NULL_HANDLE);
510            // clear_cmd_buf_and_mem_references removes elements from
511            // pInfo->commandBufferBindings -- this copy not needed in c++14,
512            // and probably not needed in practice in c++11
513            auto bindings = pInfo->commandBufferBindings;
514            for (auto cb : bindings) {
515                if (!dev_data->globalInFlightCmdBuffers.count(cb)) {
516                    clear_cmd_buf_and_mem_references(dev_data, cb);
517                }
518            }
519
520            // Now verify that no references to this mem obj remain and remove bindings
521            if (pInfo->commandBufferBindings.size() || pInfo->objBindings.size()) {
522                skipCall |= reportMemReferencesAndCleanUp(dev_data, pInfo);
523            }
524            // Delete mem obj info
525            skipCall |= deleteMemObjInfo(dev_data, object, mem);
526        }
527    }
528    return skipCall;
529}
530
531static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
532    switch (type) {
533    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
534        return "image";
535    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
536        return "buffer";
537    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
538        return "swapchain";
539    default:
540        return "unknown";
541    }
542}
543
544// Remove object binding performs 3 tasks:
545// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
546// 2. Clear mem binding for image/buffer by setting its handle to 0
547// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
548static bool clear_object_binding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
549    // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
550    bool skipCall = false;
551    VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
552    if (pMemBinding) {
553        DEVICE_MEM_INFO *pMemObjInfo = get_mem_obj_info(dev_data, *pMemBinding);
554        // TODO : Make sure this is a reasonable way to reset mem binding
555        *pMemBinding = VK_NULL_HANDLE;
556        if (pMemObjInfo) {
557            // This obj is bound to a memory object. Remove the reference to this object in that memory object's list,
558            // and set the objects memory binding pointer to NULL.
559            if (!pMemObjInfo->objBindings.erase({handle, type})) {
560                skipCall |=
561                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
562                            "MEM", "While trying to clear mem binding for %s obj 0x%" PRIxLEAST64
563                                   ", unable to find that object referenced by mem obj 0x%" PRIxLEAST64,
564                            object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
565            }
566        }
567    }
568    return skipCall;
569}
570
571// For NULL mem case, output warning
572// Make sure given object is in global object map
573//  IF a previous binding existed, output validation error
574//  Otherwise, add reference from objectInfo to memoryInfo
575//  Add reference off of objInfo
576static bool set_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
577                                VkDebugReportObjectTypeEXT type, const char *apiName) {
578    bool skipCall = false;
579    // Handle NULL case separately, just clear previous binding & decrement reference
580    if (mem == VK_NULL_HANDLE) {
581        // TODO: Verify against Valid Use section of spec.
582        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
583                           "MEM", "In %s, attempting to Bind Obj(0x%" PRIxLEAST64 ") to NULL", apiName, handle);
584    } else {
585        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
586        assert(pMemBinding);
587        DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
588        if (pMemInfo) {
589            DEVICE_MEM_INFO *pPrevBinding = get_mem_obj_info(dev_data, *pMemBinding);
590            if (pPrevBinding != NULL) {
591                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
592                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT,
593                                    "MEM", "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
594                                           ") which has already been bound to mem object 0x%" PRIxLEAST64,
595                                    apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem);
596            } else {
597                pMemInfo->objBindings.insert({handle, type});
598                // For image objects, make sure default memory state is correctly set
599                // TODO : What's the best/correct way to handle this?
600                if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
601                    auto const image_node = dev_data->imageMap.find(VkImage(handle));
602                    if (image_node != dev_data->imageMap.end()) {
603                        VkImageCreateInfo ici = image_node->second.createInfo;
604                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
605                            // TODO::  More memory state transition stuff.
606                        }
607                    }
608                }
609                *pMemBinding = mem;
610            }
611        }
612    }
613    return skipCall;
614}
615
616// For NULL mem case, clear any previous binding Else...
617// Make sure given object is in its object map
618//  IF a previous binding existed, update binding
619//  Add reference from objectInfo to memoryInfo
620//  Add reference off of object's binding info
621// Return VK_TRUE if addition is successful, VK_FALSE otherwise
622static bool set_sparse_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
623                                       VkDebugReportObjectTypeEXT type, const char *apiName) {
624    bool skipCall = VK_FALSE;
625    // Handle NULL case separately, just clear previous binding & decrement reference
626    if (mem == VK_NULL_HANDLE) {
627        skipCall = clear_object_binding(dev_data, handle, type);
628    } else {
629        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
630        assert(pMemBinding);
631        DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
632        if (pInfo) {
633            pInfo->objBindings.insert({handle, type});
634            // Need to set mem binding for this object
635            *pMemBinding = mem;
636        }
637    }
638    return skipCall;
639}
640
641// For given Object, get 'mem' obj that it's bound to or NULL if no binding
642static bool get_mem_binding_from_object(layer_data *dev_data, const uint64_t handle,
643                                            const VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
644    bool skipCall = false;
645    *mem = VK_NULL_HANDLE;
646    VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
647    if (pMemBinding) {
648        *mem = *pMemBinding;
649    } else {
650        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
651                           "MEM", "Trying to get mem binding for object 0x%" PRIxLEAST64 " but no such object in %s list", handle,
652                           object_type_to_string(type));
653    }
654    return skipCall;
655}
656
657// Print details of MemObjInfo list
658static void print_mem_list(layer_data *dev_data) {
659    DEVICE_MEM_INFO *pInfo = NULL;
660
661    // Early out if info is not requested
662    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
663        return;
664    }
665
666    // Just printing each msg individually for now, may want to package these into single large print
667    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
668            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
669            dev_data->memObjMap.size());
670    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
671            MEMTRACK_NONE, "MEM", "=============================");
672
673    if (dev_data->memObjMap.size() <= 0)
674        return;
675
676    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
677        pInfo = &(*ii).second;
678
679        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
680                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at 0x%p===", (void *)pInfo);
681        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
682                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: 0x%" PRIxLEAST64, (uint64_t)(pInfo->mem));
683        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
684                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: " PRINTF_SIZE_T_SPECIFIER,
685                pInfo->commandBufferBindings.size() + pInfo->objBindings.size());
686        if (0 != pInfo->allocInfo.allocationSize) {
687            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&pInfo->allocInfo, "MEM(INFO):         ");
688            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
689                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
690        } else {
691            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
692                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
693        }
694
695        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
696                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
697                pInfo->objBindings.size());
698        if (pInfo->objBindings.size() > 0) {
699            for (auto obj : pInfo->objBindings) {
700                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
701                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT 0x%" PRIx64, obj.handle);
702            }
703        }
704
705        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
706                __LINE__, MEMTRACK_NONE, "MEM",
707                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
708                pInfo->commandBufferBindings.size());
709        if (pInfo->commandBufferBindings.size() > 0) {
710            for (auto cb : pInfo->commandBufferBindings) {
711                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
712                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB 0x%p", cb);
713            }
714        }
715    }
716}
717
718static void printCBList(layer_data *my_data) {
719    GLOBAL_CB_NODE *pCBInfo = NULL;
720
721    // Early out if info is not requested
722    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
723        return;
724    }
725
726    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
727            MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
728            my_data->commandBufferMap.size());
729    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
730            MEMTRACK_NONE, "MEM", "==================");
731
732    if (my_data->commandBufferMap.size() <= 0)
733        return;
734
735    for (auto &cb_node : my_data->commandBufferMap) {
736        pCBInfo = cb_node.second;
737
738        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
739                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (0x%p) has CB 0x%p", (void *)pCBInfo, (void *)pCBInfo->commandBuffer);
740
741        if (pCBInfo->memObjs.size() <= 0)
742            continue;
743        for (auto obj : pCBInfo->memObjs) {
744            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
745                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj 0x%" PRIx64, (uint64_t)obj);
746        }
747    }
748}
749
750// Return a string representation of CMD_TYPE enum
751static string cmdTypeToString(CMD_TYPE cmd) {
752    switch (cmd) {
753    case CMD_BINDPIPELINE:
754        return "CMD_BINDPIPELINE";
755    case CMD_BINDPIPELINEDELTA:
756        return "CMD_BINDPIPELINEDELTA";
757    case CMD_SETVIEWPORTSTATE:
758        return "CMD_SETVIEWPORTSTATE";
759    case CMD_SETLINEWIDTHSTATE:
760        return "CMD_SETLINEWIDTHSTATE";
761    case CMD_SETDEPTHBIASSTATE:
762        return "CMD_SETDEPTHBIASSTATE";
763    case CMD_SETBLENDSTATE:
764        return "CMD_SETBLENDSTATE";
765    case CMD_SETDEPTHBOUNDSSTATE:
766        return "CMD_SETDEPTHBOUNDSSTATE";
767    case CMD_SETSTENCILREADMASKSTATE:
768        return "CMD_SETSTENCILREADMASKSTATE";
769    case CMD_SETSTENCILWRITEMASKSTATE:
770        return "CMD_SETSTENCILWRITEMASKSTATE";
771    case CMD_SETSTENCILREFERENCESTATE:
772        return "CMD_SETSTENCILREFERENCESTATE";
773    case CMD_BINDDESCRIPTORSETS:
774        return "CMD_BINDDESCRIPTORSETS";
775    case CMD_BINDINDEXBUFFER:
776        return "CMD_BINDINDEXBUFFER";
777    case CMD_BINDVERTEXBUFFER:
778        return "CMD_BINDVERTEXBUFFER";
779    case CMD_DRAW:
780        return "CMD_DRAW";
781    case CMD_DRAWINDEXED:
782        return "CMD_DRAWINDEXED";
783    case CMD_DRAWINDIRECT:
784        return "CMD_DRAWINDIRECT";
785    case CMD_DRAWINDEXEDINDIRECT:
786        return "CMD_DRAWINDEXEDINDIRECT";
787    case CMD_DISPATCH:
788        return "CMD_DISPATCH";
789    case CMD_DISPATCHINDIRECT:
790        return "CMD_DISPATCHINDIRECT";
791    case CMD_COPYBUFFER:
792        return "CMD_COPYBUFFER";
793    case CMD_COPYIMAGE:
794        return "CMD_COPYIMAGE";
795    case CMD_BLITIMAGE:
796        return "CMD_BLITIMAGE";
797    case CMD_COPYBUFFERTOIMAGE:
798        return "CMD_COPYBUFFERTOIMAGE";
799    case CMD_COPYIMAGETOBUFFER:
800        return "CMD_COPYIMAGETOBUFFER";
801    case CMD_CLONEIMAGEDATA:
802        return "CMD_CLONEIMAGEDATA";
803    case CMD_UPDATEBUFFER:
804        return "CMD_UPDATEBUFFER";
805    case CMD_FILLBUFFER:
806        return "CMD_FILLBUFFER";
807    case CMD_CLEARCOLORIMAGE:
808        return "CMD_CLEARCOLORIMAGE";
809    case CMD_CLEARATTACHMENTS:
810        return "CMD_CLEARCOLORATTACHMENT";
811    case CMD_CLEARDEPTHSTENCILIMAGE:
812        return "CMD_CLEARDEPTHSTENCILIMAGE";
813    case CMD_RESOLVEIMAGE:
814        return "CMD_RESOLVEIMAGE";
815    case CMD_SETEVENT:
816        return "CMD_SETEVENT";
817    case CMD_RESETEVENT:
818        return "CMD_RESETEVENT";
819    case CMD_WAITEVENTS:
820        return "CMD_WAITEVENTS";
821    case CMD_PIPELINEBARRIER:
822        return "CMD_PIPELINEBARRIER";
823    case CMD_BEGINQUERY:
824        return "CMD_BEGINQUERY";
825    case CMD_ENDQUERY:
826        return "CMD_ENDQUERY";
827    case CMD_RESETQUERYPOOL:
828        return "CMD_RESETQUERYPOOL";
829    case CMD_COPYQUERYPOOLRESULTS:
830        return "CMD_COPYQUERYPOOLRESULTS";
831    case CMD_WRITETIMESTAMP:
832        return "CMD_WRITETIMESTAMP";
833    case CMD_INITATOMICCOUNTERS:
834        return "CMD_INITATOMICCOUNTERS";
835    case CMD_LOADATOMICCOUNTERS:
836        return "CMD_LOADATOMICCOUNTERS";
837    case CMD_SAVEATOMICCOUNTERS:
838        return "CMD_SAVEATOMICCOUNTERS";
839    case CMD_BEGINRENDERPASS:
840        return "CMD_BEGINRENDERPASS";
841    case CMD_ENDRENDERPASS:
842        return "CMD_ENDRENDERPASS";
843    default:
844        return "UNKNOWN";
845    }
846}
847
848// SPIRV utility functions
849static void build_def_index(shader_module *module) {
850    for (auto insn : *module) {
851        switch (insn.opcode()) {
852        /* Types */
853        case spv::OpTypeVoid:
854        case spv::OpTypeBool:
855        case spv::OpTypeInt:
856        case spv::OpTypeFloat:
857        case spv::OpTypeVector:
858        case spv::OpTypeMatrix:
859        case spv::OpTypeImage:
860        case spv::OpTypeSampler:
861        case spv::OpTypeSampledImage:
862        case spv::OpTypeArray:
863        case spv::OpTypeRuntimeArray:
864        case spv::OpTypeStruct:
865        case spv::OpTypeOpaque:
866        case spv::OpTypePointer:
867        case spv::OpTypeFunction:
868        case spv::OpTypeEvent:
869        case spv::OpTypeDeviceEvent:
870        case spv::OpTypeReserveId:
871        case spv::OpTypeQueue:
872        case spv::OpTypePipe:
873            module->def_index[insn.word(1)] = insn.offset();
874            break;
875
876        /* Fixed constants */
877        case spv::OpConstantTrue:
878        case spv::OpConstantFalse:
879        case spv::OpConstant:
880        case spv::OpConstantComposite:
881        case spv::OpConstantSampler:
882        case spv::OpConstantNull:
883            module->def_index[insn.word(2)] = insn.offset();
884            break;
885
886        /* Specialization constants */
887        case spv::OpSpecConstantTrue:
888        case spv::OpSpecConstantFalse:
889        case spv::OpSpecConstant:
890        case spv::OpSpecConstantComposite:
891        case spv::OpSpecConstantOp:
892            module->def_index[insn.word(2)] = insn.offset();
893            break;
894
895        /* Variables */
896        case spv::OpVariable:
897            module->def_index[insn.word(2)] = insn.offset();
898            break;
899
900        /* Functions */
901        case spv::OpFunction:
902            module->def_index[insn.word(2)] = insn.offset();
903            break;
904
905        default:
906            /* We don't care about any other defs for now. */
907            break;
908        }
909    }
910}
911
912static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
913    for (auto insn : *src) {
914        if (insn.opcode() == spv::OpEntryPoint) {
915            auto entrypointName = (char const *)&insn.word(3);
916            auto entrypointStageBits = 1u << insn.word(1);
917
918            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
919                return insn;
920            }
921        }
922    }
923
924    return src->end();
925}
926
927static char const *storage_class_name(unsigned sc) {
928    switch (sc) {
929    case spv::StorageClassInput:
930        return "input";
931    case spv::StorageClassOutput:
932        return "output";
933    case spv::StorageClassUniformConstant:
934        return "const uniform";
935    case spv::StorageClassUniform:
936        return "uniform";
937    case spv::StorageClassWorkgroup:
938        return "workgroup local";
939    case spv::StorageClassCrossWorkgroup:
940        return "workgroup global";
941    case spv::StorageClassPrivate:
942        return "private global";
943    case spv::StorageClassFunction:
944        return "function";
945    case spv::StorageClassGeneric:
946        return "generic";
947    case spv::StorageClassAtomicCounter:
948        return "atomic counter";
949    case spv::StorageClassImage:
950        return "image";
951    case spv::StorageClassPushConstant:
952        return "push constant";
953    default:
954        return "unknown";
955    }
956}
957
958/* get the value of an integral constant */
959unsigned get_constant_value(shader_module const *src, unsigned id) {
960    auto value = src->get_def(id);
961    assert(value != src->end());
962
963    if (value.opcode() != spv::OpConstant) {
964        /* TODO: Either ensure that the specialization transform is already performed on a module we're
965            considering here, OR -- specialize on the fly now.
966            */
967        return 1;
968    }
969
970    return value.word(3);
971}
972
973
974static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
975    auto insn = src->get_def(type);
976    assert(insn != src->end());
977
978    switch (insn.opcode()) {
979    case spv::OpTypeBool:
980        ss << "bool";
981        break;
982    case spv::OpTypeInt:
983        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
984        break;
985    case spv::OpTypeFloat:
986        ss << "float" << insn.word(2);
987        break;
988    case spv::OpTypeVector:
989        ss << "vec" << insn.word(3) << " of ";
990        describe_type_inner(ss, src, insn.word(2));
991        break;
992    case spv::OpTypeMatrix:
993        ss << "mat" << insn.word(3) << " of ";
994        describe_type_inner(ss, src, insn.word(2));
995        break;
996    case spv::OpTypeArray:
997        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
998        describe_type_inner(ss, src, insn.word(2));
999        break;
1000    case spv::OpTypePointer:
1001        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1002        describe_type_inner(ss, src, insn.word(3));
1003        break;
1004    case spv::OpTypeStruct: {
1005        ss << "struct of (";
1006        for (unsigned i = 2; i < insn.len(); i++) {
1007            describe_type_inner(ss, src, insn.word(i));
1008            if (i == insn.len() - 1) {
1009                ss << ")";
1010            } else {
1011                ss << ", ";
1012            }
1013        }
1014        break;
1015    }
1016    case spv::OpTypeSampler:
1017        ss << "sampler";
1018        break;
1019    case spv::OpTypeSampledImage:
1020        ss << "sampler+";
1021        describe_type_inner(ss, src, insn.word(2));
1022        break;
1023    case spv::OpTypeImage:
1024        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1025        break;
1026    default:
1027        ss << "oddtype";
1028        break;
1029    }
1030}
1031
1032
1033static std::string describe_type(shader_module const *src, unsigned type) {
1034    std::ostringstream ss;
1035    describe_type_inner(ss, src, type);
1036    return ss.str();
1037}
1038
1039
1040static bool is_narrow_numeric_type(spirv_inst_iter type)
1041{
1042    if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat)
1043        return false;
1044    return type.word(2) < 64;
1045}
1046
1047
1048static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) {
1049    /* walk two type trees together, and complain about differences */
1050    auto a_insn = a->get_def(a_type);
1051    auto b_insn = b->get_def(b_type);
1052    assert(a_insn != a->end());
1053    assert(b_insn != b->end());
1054
1055    if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1056        return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
1057    }
1058
1059    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1060        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1061        return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1062    }
1063
1064    if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1065        return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
1066    }
1067
1068    if (a_insn.opcode() != b_insn.opcode()) {
1069        return false;
1070    }
1071
1072    if (a_insn.opcode() == spv::OpTypePointer) {
1073        /* match on pointee type. storage class is expected to differ */
1074        return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
1075    }
1076
1077    if (a_arrayed || b_arrayed) {
1078        /* if we havent resolved array-of-verts by here, we're not going to. */
1079        return false;
1080    }
1081
1082    switch (a_insn.opcode()) {
1083    case spv::OpTypeBool:
1084        return true;
1085    case spv::OpTypeInt:
1086        /* match on width, signedness */
1087        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1088    case spv::OpTypeFloat:
1089        /* match on width */
1090        return a_insn.word(2) == b_insn.word(2);
1091    case spv::OpTypeVector:
1092        /* match on element type, count. */
1093        if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false))
1094            return false;
1095        if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1096            return a_insn.word(3) >= b_insn.word(3);
1097        }
1098        else {
1099            return a_insn.word(3) == b_insn.word(3);
1100        }
1101    case spv::OpTypeMatrix:
1102        /* match on element type, count. */
1103        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3);
1104    case spv::OpTypeArray:
1105        /* match on element type, count. these all have the same layout. we don't get here if
1106         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1107         * not a literal within OpTypeArray */
1108        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1109               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1110    case spv::OpTypeStruct:
1111        /* match on all element types */
1112        {
1113            if (a_insn.len() != b_insn.len()) {
1114                return false; /* structs cannot match if member counts differ */
1115            }
1116
1117            for (unsigned i = 2; i < a_insn.len(); i++) {
1118                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
1119                    return false;
1120                }
1121            }
1122
1123            return true;
1124        }
1125    default:
1126        /* remaining types are CLisms, or may not appear in the interfaces we
1127         * are interested in. Just claim no match.
1128         */
1129        return false;
1130    }
1131}
1132
1133static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1134    auto it = map.find(id);
1135    if (it == map.end())
1136        return def;
1137    else
1138        return it->second;
1139}
1140
1141static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1142    auto insn = src->get_def(type);
1143    assert(insn != src->end());
1144
1145    switch (insn.opcode()) {
1146    case spv::OpTypePointer:
1147        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1148         * we're never actually passing pointers around. */
1149        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1150    case spv::OpTypeArray:
1151        if (strip_array_level) {
1152            return get_locations_consumed_by_type(src, insn.word(2), false);
1153        } else {
1154            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1155        }
1156    case spv::OpTypeMatrix:
1157        /* num locations is the dimension * element size */
1158        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1159    case spv::OpTypeVector: {
1160        auto scalar_type = src->get_def(insn.word(2));
1161        auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ?
1162            scalar_type.word(2) : 32;
1163
1164        /* locations are 128-bit wide; 3- and 4-component vectors of 64 bit
1165         * types require two. */
1166        return (bit_width * insn.word(3) + 127) / 128;
1167    }
1168    default:
1169        /* everything else is just 1. */
1170        return 1;
1171
1172        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1173         * multiple locations. */
1174    }
1175}
1176
1177static unsigned get_locations_consumed_by_format(VkFormat format) {
1178    switch (format) {
1179    case VK_FORMAT_R64G64B64A64_SFLOAT:
1180    case VK_FORMAT_R64G64B64A64_SINT:
1181    case VK_FORMAT_R64G64B64A64_UINT:
1182    case VK_FORMAT_R64G64B64_SFLOAT:
1183    case VK_FORMAT_R64G64B64_SINT:
1184    case VK_FORMAT_R64G64B64_UINT:
1185        return 2;
1186    default:
1187        return 1;
1188    }
1189}
1190
1191typedef std::pair<unsigned, unsigned> location_t;
1192typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1193
1194struct interface_var {
1195    uint32_t id;
1196    uint32_t type_id;
1197    uint32_t offset;
1198    bool is_patch;
1199    bool is_block_member;
1200    /* TODO: collect the name, too? Isn't required to be present. */
1201};
1202
1203struct shader_stage_attributes {
1204    char const *const name;
1205    bool arrayed_input;
1206    bool arrayed_output;
1207};
1208
1209static shader_stage_attributes shader_stage_attribs[] = {
1210    {"vertex shader", false, false},
1211    {"tessellation control shader", true, true},
1212    {"tessellation evaluation shader", true, false},
1213    {"geometry shader", true, false},
1214    {"fragment shader", false, false},
1215};
1216
1217static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1218    while (true) {
1219
1220        if (def.opcode() == spv::OpTypePointer) {
1221            def = src->get_def(def.word(3));
1222        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1223            def = src->get_def(def.word(2));
1224            is_array_of_verts = false;
1225        } else if (def.opcode() == spv::OpTypeStruct) {
1226            return def;
1227        } else {
1228            return src->end();
1229        }
1230    }
1231}
1232
1233static void collect_interface_block_members(shader_module const *src,
1234                                            std::map<location_t, interface_var> &out,
1235                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1236                                            uint32_t id, uint32_t type_id, bool is_patch) {
1237    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1238    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1239    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1240        /* this isn't an interface block. */
1241        return;
1242    }
1243
1244    std::unordered_map<unsigned, unsigned> member_components;
1245
1246    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1247    for (auto insn : *src) {
1248        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1249            unsigned member_index = insn.word(2);
1250
1251            if (insn.word(3) == spv::DecorationComponent) {
1252                unsigned component = insn.word(4);
1253                member_components[member_index] = component;
1254            }
1255        }
1256    }
1257
1258    /* Second pass -- produce the output, from Location decorations */
1259    for (auto insn : *src) {
1260        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1261            unsigned member_index = insn.word(2);
1262            unsigned member_type_id = type.word(2 + member_index);
1263
1264            if (insn.word(3) == spv::DecorationLocation) {
1265                unsigned location = insn.word(4);
1266                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1267                auto component_it = member_components.find(member_index);
1268                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1269
1270                for (unsigned int offset = 0; offset < num_locations; offset++) {
1271                    interface_var v;
1272                    v.id = id;
1273                    /* TODO: member index in interface_var too? */
1274                    v.type_id = member_type_id;
1275                    v.offset = offset;
1276                    v.is_patch = is_patch;
1277                    v.is_block_member = true;
1278                    out[std::make_pair(location + offset, component)] = v;
1279                }
1280            }
1281        }
1282    }
1283}
1284
1285static void collect_interface_by_location(shader_module const *src, spirv_inst_iter entrypoint,
1286                                          spv::StorageClass sinterface, std::map<location_t, interface_var> &out,
1287                                          bool is_array_of_verts) {
1288    std::unordered_map<unsigned, unsigned> var_locations;
1289    std::unordered_map<unsigned, unsigned> var_builtins;
1290    std::unordered_map<unsigned, unsigned> var_components;
1291    std::unordered_map<unsigned, unsigned> blocks;
1292    std::unordered_map<unsigned, unsigned> var_patch;
1293
1294    for (auto insn : *src) {
1295
1296        /* We consider two interface models: SSO rendezvous-by-location, and
1297         * builtins. Complain about anything that fits neither model.
1298         */
1299        if (insn.opcode() == spv::OpDecorate) {
1300            if (insn.word(2) == spv::DecorationLocation) {
1301                var_locations[insn.word(1)] = insn.word(3);
1302            }
1303
1304            if (insn.word(2) == spv::DecorationBuiltIn) {
1305                var_builtins[insn.word(1)] = insn.word(3);
1306            }
1307
1308            if (insn.word(2) == spv::DecorationComponent) {
1309                var_components[insn.word(1)] = insn.word(3);
1310            }
1311
1312            if (insn.word(2) == spv::DecorationBlock) {
1313                blocks[insn.word(1)] = 1;
1314            }
1315
1316            if (insn.word(2) == spv::DecorationPatch) {
1317                var_patch[insn.word(1)] = 1;
1318            }
1319        }
1320    }
1321
1322    /* TODO: handle grouped decorations */
1323    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1324     * have the same location, and we DON'T want to clobber. */
1325
1326    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1327       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1328       the word to determine which word contains the terminator. */
1329    uint32_t word = 3;
1330    while (entrypoint.word(word) & 0xff000000u) {
1331        ++word;
1332    }
1333    ++word;
1334
1335    for (; word < entrypoint.len(); word++) {
1336        auto insn = src->get_def(entrypoint.word(word));
1337        assert(insn != src->end());
1338        assert(insn.opcode() == spv::OpVariable);
1339
1340        if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
1341            unsigned id = insn.word(2);
1342            unsigned type = insn.word(1);
1343
1344            int location = value_or_default(var_locations, id, -1);
1345            int builtin = value_or_default(var_builtins, id, -1);
1346            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1347            bool is_patch = var_patch.find(id) != var_patch.end();
1348
1349            /* All variables and interface block members in the Input or Output storage classes
1350             * must be decorated with either a builtin or an explicit location.
1351             *
1352             * TODO: integrate the interface block support here. For now, don't complain --
1353             * a valid SPIRV module will only hit this path for the interface block case, as the
1354             * individual members of the type are decorated, rather than variable declarations.
1355             */
1356
1357            if (location != -1) {
1358                /* A user-defined interface variable, with a location. Where a variable
1359                 * occupied multiple locations, emit one result for each. */
1360                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1361                for (unsigned int offset = 0; offset < num_locations; offset++) {
1362                    interface_var v;
1363                    v.id = id;
1364                    v.type_id = type;
1365                    v.offset = offset;
1366                    v.is_patch = is_patch;
1367                    v.is_block_member = false;
1368                    out[std::make_pair(location + offset, component)] = v;
1369                }
1370            } else if (builtin == -1) {
1371                /* An interface block instance */
1372                collect_interface_block_members(src, out, blocks, is_array_of_verts, id, type, is_patch);
1373            }
1374        }
1375    }
1376}
1377
1378static void collect_interface_by_descriptor_slot(debug_report_data *report_data, shader_module const *src,
1379                                                 std::unordered_set<uint32_t> const &accessible_ids,
1380                                                 std::map<descriptor_slot_t, interface_var> &out) {
1381
1382    std::unordered_map<unsigned, unsigned> var_sets;
1383    std::unordered_map<unsigned, unsigned> var_bindings;
1384
1385    for (auto insn : *src) {
1386        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1387         * DecorationDescriptorSet and DecorationBinding.
1388         */
1389        if (insn.opcode() == spv::OpDecorate) {
1390            if (insn.word(2) == spv::DecorationDescriptorSet) {
1391                var_sets[insn.word(1)] = insn.word(3);
1392            }
1393
1394            if (insn.word(2) == spv::DecorationBinding) {
1395                var_bindings[insn.word(1)] = insn.word(3);
1396            }
1397        }
1398    }
1399
1400    for (auto id : accessible_ids) {
1401        auto insn = src->get_def(id);
1402        assert(insn != src->end());
1403
1404        if (insn.opcode() == spv::OpVariable &&
1405            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1406            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1407            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1408
1409            auto existing_it = out.find(std::make_pair(set, binding));
1410            if (existing_it != out.end()) {
1411                /* conflict within spv image */
1412                log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1413                        __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
1414                        "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition",
1415                        insn.word(2), insn.word(1), storage_class_name(insn.word(3)), existing_it->first.first,
1416                        existing_it->first.second);
1417            }
1418
1419            interface_var v;
1420            v.id = insn.word(2);
1421            v.type_id = insn.word(1);
1422            v.offset = 0;
1423            v.is_patch = false;
1424            v.is_block_member = false;
1425            out[std::make_pair(set, binding)] = v;
1426        }
1427    }
1428}
1429
1430static bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer,
1431                                              spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1432                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1433                                              shader_stage_attributes const *consumer_stage) {
1434    std::map<location_t, interface_var> outputs;
1435    std::map<location_t, interface_var> inputs;
1436
1437    bool pass = true;
1438
1439    collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, outputs, producer_stage->arrayed_output);
1440    collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, inputs, consumer_stage->arrayed_input);
1441
1442    auto a_it = outputs.begin();
1443    auto b_it = inputs.begin();
1444
1445    /* maps sorted by key (location); walk them together to find mismatches */
1446    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1447        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1448        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1449        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1450        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1451
1452        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1453            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1454                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1455                        "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1456                        a_first.second, consumer_stage->name)) {
1457                pass = false;
1458            }
1459            a_it++;
1460        } else if (a_at_end || a_first > b_first) {
1461            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1462                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1463                        "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second,
1464                        producer_stage->name)) {
1465                pass = false;
1466            }
1467            b_it++;
1468        } else {
1469            // subtleties of arrayed interfaces:
1470            // - if is_patch, then the member is not arrayed, even though the interface may be.
1471            // - if is_block_member, then the extra array level of an arrayed interface is not
1472            //   expressed in the member type -- it's expressed in the block type.
1473            if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1474                             producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1475                             consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member,
1476                             true)) {
1477                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1478                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1479                            a_first.first, a_first.second,
1480                            describe_type(producer, a_it->second.type_id).c_str(),
1481                            describe_type(consumer, b_it->second.type_id).c_str())) {
1482                    pass = false;
1483                }
1484            }
1485            if (a_it->second.is_patch != b_it->second.is_patch) {
1486                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1487                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1488                            "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1489                            "per-%s in %s stage", a_first.first, a_first.second,
1490                            a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1491                            b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1492                    pass = false;
1493                }
1494            }
1495            a_it++;
1496            b_it++;
1497        }
1498    }
1499
1500    return pass;
1501}
1502
1503enum FORMAT_TYPE {
1504    FORMAT_TYPE_UNDEFINED,
1505    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1506    FORMAT_TYPE_SINT,
1507    FORMAT_TYPE_UINT,
1508};
1509
1510static unsigned get_format_type(VkFormat fmt) {
1511    switch (fmt) {
1512    case VK_FORMAT_UNDEFINED:
1513        return FORMAT_TYPE_UNDEFINED;
1514    case VK_FORMAT_R8_SINT:
1515    case VK_FORMAT_R8G8_SINT:
1516    case VK_FORMAT_R8G8B8_SINT:
1517    case VK_FORMAT_R8G8B8A8_SINT:
1518    case VK_FORMAT_R16_SINT:
1519    case VK_FORMAT_R16G16_SINT:
1520    case VK_FORMAT_R16G16B16_SINT:
1521    case VK_FORMAT_R16G16B16A16_SINT:
1522    case VK_FORMAT_R32_SINT:
1523    case VK_FORMAT_R32G32_SINT:
1524    case VK_FORMAT_R32G32B32_SINT:
1525    case VK_FORMAT_R32G32B32A32_SINT:
1526    case VK_FORMAT_R64_SINT:
1527    case VK_FORMAT_R64G64_SINT:
1528    case VK_FORMAT_R64G64B64_SINT:
1529    case VK_FORMAT_R64G64B64A64_SINT:
1530    case VK_FORMAT_B8G8R8_SINT:
1531    case VK_FORMAT_B8G8R8A8_SINT:
1532    case VK_FORMAT_A8B8G8R8_SINT_PACK32:
1533    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1534    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1535        return FORMAT_TYPE_SINT;
1536    case VK_FORMAT_R8_UINT:
1537    case VK_FORMAT_R8G8_UINT:
1538    case VK_FORMAT_R8G8B8_UINT:
1539    case VK_FORMAT_R8G8B8A8_UINT:
1540    case VK_FORMAT_R16_UINT:
1541    case VK_FORMAT_R16G16_UINT:
1542    case VK_FORMAT_R16G16B16_UINT:
1543    case VK_FORMAT_R16G16B16A16_UINT:
1544    case VK_FORMAT_R32_UINT:
1545    case VK_FORMAT_R32G32_UINT:
1546    case VK_FORMAT_R32G32B32_UINT:
1547    case VK_FORMAT_R32G32B32A32_UINT:
1548    case VK_FORMAT_R64_UINT:
1549    case VK_FORMAT_R64G64_UINT:
1550    case VK_FORMAT_R64G64B64_UINT:
1551    case VK_FORMAT_R64G64B64A64_UINT:
1552    case VK_FORMAT_B8G8R8_UINT:
1553    case VK_FORMAT_B8G8R8A8_UINT:
1554    case VK_FORMAT_A8B8G8R8_UINT_PACK32:
1555    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1556    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1557        return FORMAT_TYPE_UINT;
1558    default:
1559        return FORMAT_TYPE_FLOAT;
1560    }
1561}
1562
1563/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1564 * for comparison to a VkFormat's characterization above. */
1565static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1566    auto insn = src->get_def(type);
1567    assert(insn != src->end());
1568
1569    switch (insn.opcode()) {
1570    case spv::OpTypeInt:
1571        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1572    case spv::OpTypeFloat:
1573        return FORMAT_TYPE_FLOAT;
1574    case spv::OpTypeVector:
1575        return get_fundamental_type(src, insn.word(2));
1576    case spv::OpTypeMatrix:
1577        return get_fundamental_type(src, insn.word(2));
1578    case spv::OpTypeArray:
1579        return get_fundamental_type(src, insn.word(2));
1580    case spv::OpTypePointer:
1581        return get_fundamental_type(src, insn.word(3));
1582    default:
1583        return FORMAT_TYPE_UNDEFINED;
1584    }
1585}
1586
1587static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1588    uint32_t bit_pos = u_ffs(stage);
1589    return bit_pos - 1;
1590}
1591
1592static bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1593    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1594     * each binding should be specified only once.
1595     */
1596    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1597    bool pass = true;
1598
1599    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1600        auto desc = &vi->pVertexBindingDescriptions[i];
1601        auto &binding = bindings[desc->binding];
1602        if (binding) {
1603            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1604                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1605                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1606                pass = false;
1607            }
1608        } else {
1609            binding = desc;
1610        }
1611    }
1612
1613    return pass;
1614}
1615
1616static bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
1617                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1618    std::map<location_t, interface_var> inputs;
1619    bool pass = true;
1620
1621    collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, inputs, false);
1622
1623    /* Build index by location */
1624    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1625    if (vi) {
1626        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1627            auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1628            for (auto j = 0u; j < num_locations; j++) {
1629                attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1630            }
1631        }
1632    }
1633
1634    auto it_a = attribs.begin();
1635    auto it_b = inputs.begin();
1636
1637    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1638        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1639        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1640        auto a_first = a_at_end ? 0 : it_a->first;
1641        auto b_first = b_at_end ? 0 : it_b->first.first;
1642        if (!a_at_end && (b_at_end || a_first < b_first)) {
1643            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1644                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1645                        "Vertex attribute at location %d not consumed by VS", a_first)) {
1646                pass = false;
1647            }
1648            it_a++;
1649        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1650            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1651                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
1652                        b_first)) {
1653                pass = false;
1654            }
1655            it_b++;
1656        } else {
1657            unsigned attrib_type = get_format_type(it_a->second->format);
1658            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1659
1660            /* type checking */
1661            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1662                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1663                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1664                            "Attribute type of `%s` at location %d does not match VS input type of `%s`",
1665                            string_VkFormat(it_a->second->format), a_first,
1666                            describe_type(vs, it_b->second.type_id).c_str())) {
1667                    pass = false;
1668                }
1669            }
1670
1671            /* OK! */
1672            it_a++;
1673            it_b++;
1674        }
1675    }
1676
1677    return pass;
1678}
1679
1680static bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs,
1681                                                    spirv_inst_iter entrypoint, RENDER_PASS_NODE const *rp, uint32_t subpass) {
1682    std::map<location_t, interface_var> outputs;
1683    std::map<uint32_t, VkFormat> color_attachments;
1684    for (auto i = 0u; i < rp->subpassColorFormats[subpass].size(); i++) {
1685        if (rp->subpassColorFormats[subpass][i] != VK_FORMAT_UNDEFINED) {
1686            color_attachments[i] = rp->subpassColorFormats[subpass][i];
1687        }
1688    }
1689
1690    bool pass = true;
1691
1692    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1693
1694    collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, outputs, false);
1695
1696    auto it_a = outputs.begin();
1697    auto it_b = color_attachments.begin();
1698
1699    /* Walk attachment list and outputs together */
1700
1701    while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
1702        bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
1703        bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
1704
1705        if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
1706            if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1707                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1708                        "FS writes to output location %d with no matching attachment", it_a->first.first)) {
1709                pass = false;
1710            }
1711            it_a++;
1712        } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
1713            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1714                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", it_b->first)) {
1715                pass = false;
1716            }
1717            it_b++;
1718        } else {
1719            unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
1720            unsigned att_type = get_format_type(it_b->second);
1721
1722            /* type checking */
1723            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1724                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1725                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1726                            "Attachment %d of type `%s` does not match FS output type of `%s`", it_b->first,
1727                            string_VkFormat(it_b->second),
1728                            describe_type(fs, it_a->second.type_id).c_str())) {
1729                    pass = false;
1730                }
1731            }
1732
1733            /* OK! */
1734            it_a++;
1735            it_b++;
1736        }
1737    }
1738
1739    return pass;
1740}
1741
1742/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
1743 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
1744 * for example.
1745 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1746 *  - NOT the shader input/output interfaces.
1747 *
1748 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1749 * converting parts of this to be generated from the machine-readable spec instead.
1750 */
1751static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) {
1752    std::unordered_set<uint32_t> worklist;
1753    worklist.insert(entrypoint.word(2));
1754
1755    while (!worklist.empty()) {
1756        auto id_iter = worklist.begin();
1757        auto id = *id_iter;
1758        worklist.erase(id_iter);
1759
1760        auto insn = src->get_def(id);
1761        if (insn == src->end()) {
1762            /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble
1763             * across all kinds of things here that we may not care about. */
1764            continue;
1765        }
1766
1767        /* try to add to the output set */
1768        if (!ids.insert(id).second) {
1769            continue; /* if we already saw this id, we don't want to walk it again. */
1770        }
1771
1772        switch (insn.opcode()) {
1773        case spv::OpFunction:
1774            /* scan whole body of the function, enlisting anything interesting */
1775            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1776                switch (insn.opcode()) {
1777                case spv::OpLoad:
1778                case spv::OpAtomicLoad:
1779                case spv::OpAtomicExchange:
1780                case spv::OpAtomicCompareExchange:
1781                case spv::OpAtomicCompareExchangeWeak:
1782                case spv::OpAtomicIIncrement:
1783                case spv::OpAtomicIDecrement:
1784                case spv::OpAtomicIAdd:
1785                case spv::OpAtomicISub:
1786                case spv::OpAtomicSMin:
1787                case spv::OpAtomicUMin:
1788                case spv::OpAtomicSMax:
1789                case spv::OpAtomicUMax:
1790                case spv::OpAtomicAnd:
1791                case spv::OpAtomicOr:
1792                case spv::OpAtomicXor:
1793                    worklist.insert(insn.word(3)); /* ptr */
1794                    break;
1795                case spv::OpStore:
1796                case spv::OpAtomicStore:
1797                    worklist.insert(insn.word(1)); /* ptr */
1798                    break;
1799                case spv::OpAccessChain:
1800                case spv::OpInBoundsAccessChain:
1801                    worklist.insert(insn.word(3)); /* base ptr */
1802                    break;
1803                case spv::OpSampledImage:
1804                case spv::OpImageSampleImplicitLod:
1805                case spv::OpImageSampleExplicitLod:
1806                case spv::OpImageSampleDrefImplicitLod:
1807                case spv::OpImageSampleDrefExplicitLod:
1808                case spv::OpImageSampleProjImplicitLod:
1809                case spv::OpImageSampleProjExplicitLod:
1810                case spv::OpImageSampleProjDrefImplicitLod:
1811                case spv::OpImageSampleProjDrefExplicitLod:
1812                case spv::OpImageFetch:
1813                case spv::OpImageGather:
1814                case spv::OpImageDrefGather:
1815                case spv::OpImageRead:
1816                case spv::OpImage:
1817                case spv::OpImageQueryFormat:
1818                case spv::OpImageQueryOrder:
1819                case spv::OpImageQuerySizeLod:
1820                case spv::OpImageQuerySize:
1821                case spv::OpImageQueryLod:
1822                case spv::OpImageQueryLevels:
1823                case spv::OpImageQuerySamples:
1824                case spv::OpImageSparseSampleImplicitLod:
1825                case spv::OpImageSparseSampleExplicitLod:
1826                case spv::OpImageSparseSampleDrefImplicitLod:
1827                case spv::OpImageSparseSampleDrefExplicitLod:
1828                case spv::OpImageSparseSampleProjImplicitLod:
1829                case spv::OpImageSparseSampleProjExplicitLod:
1830                case spv::OpImageSparseSampleProjDrefImplicitLod:
1831                case spv::OpImageSparseSampleProjDrefExplicitLod:
1832                case spv::OpImageSparseFetch:
1833                case spv::OpImageSparseGather:
1834                case spv::OpImageSparseDrefGather:
1835                case spv::OpImageTexelPointer:
1836                    worklist.insert(insn.word(3)); /* image or sampled image */
1837                    break;
1838                case spv::OpImageWrite:
1839                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
1840                    break;
1841                case spv::OpFunctionCall:
1842                    for (uint32_t i = 3; i < insn.len(); i++) {
1843                        worklist.insert(insn.word(i)); /* fn itself, and all args */
1844                    }
1845                    break;
1846
1847                case spv::OpExtInst:
1848                    for (uint32_t i = 5; i < insn.len(); i++) {
1849                        worklist.insert(insn.word(i)); /* operands to ext inst */
1850                    }
1851                    break;
1852                }
1853            }
1854            break;
1855        }
1856    }
1857}
1858
1859static bool validate_push_constant_block_against_pipeline(debug_report_data *report_data,
1860                                                          std::vector<VkPushConstantRange> const *pushConstantRanges,
1861                                                          shader_module const *src, spirv_inst_iter type,
1862                                                          VkShaderStageFlagBits stage) {
1863    bool pass = true;
1864
1865    /* strip off ptrs etc */
1866    type = get_struct_type(src, type, false);
1867    assert(type != src->end());
1868
1869    /* validate directly off the offsets. this isn't quite correct for arrays
1870     * and matrices, but is a good first step. TODO: arrays, matrices, weird
1871     * sizes */
1872    for (auto insn : *src) {
1873        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1874
1875            if (insn.word(3) == spv::DecorationOffset) {
1876                unsigned offset = insn.word(4);
1877                auto size = 4; /* bytes; TODO: calculate this based on the type */
1878
1879                bool found_range = false;
1880                for (auto const &range : *pushConstantRanges) {
1881                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
1882                        found_range = true;
1883
1884                        if ((range.stageFlags & stage) == 0) {
1885                            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1886                                        __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
1887                                        "Push constant range covering variable starting at "
1888                                        "offset %u not accessible from stage %s",
1889                                        offset, string_VkShaderStageFlagBits(stage))) {
1890                                pass = false;
1891                            }
1892                        }
1893
1894                        break;
1895                    }
1896                }
1897
1898                if (!found_range) {
1899                    if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1900                                __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
1901                                "Push constant range covering variable starting at "
1902                                "offset %u not declared in layout",
1903                                offset)) {
1904                        pass = false;
1905                    }
1906                }
1907            }
1908        }
1909    }
1910
1911    return pass;
1912}
1913
1914static bool validate_push_constant_usage(debug_report_data *report_data,
1915                                         std::vector<VkPushConstantRange> const *pushConstantRanges, shader_module const *src,
1916                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
1917    bool pass = true;
1918
1919    for (auto id : accessible_ids) {
1920        auto def_insn = src->get_def(id);
1921        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
1922            pass &= validate_push_constant_block_against_pipeline(report_data, pushConstantRanges, src,
1923                                                                 src->get_def(def_insn.word(1)), stage);
1924        }
1925    }
1926
1927    return pass;
1928}
1929
1930// For given pipelineLayout verify that the set_layout_node at slot.first
1931//  has the requested binding at slot.second and return ptr to that binding
1932static VkDescriptorSetLayoutBinding const * get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout, descriptor_slot_t slot) {
1933
1934    if (!pipelineLayout)
1935        return nullptr;
1936
1937    if (slot.first >= pipelineLayout->descriptorSetLayouts.size())
1938        return nullptr;
1939
1940    return pipelineLayout->setLayouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
1941}
1942
1943// Block of code at start here for managing/tracking Pipeline state that this layer cares about
1944
1945static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
1946
1947// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
1948//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
1949//   to that same cmd buffer by separate thread are not changing state from underneath us
1950// Track the last cmd buffer touched by this thread
1951
1952static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) {
1953    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
1954        if (pCB->drawCount[i])
1955            return true;
1956    }
1957    return false;
1958}
1959
1960// Check object status for selected flag state
1961static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
1962                            DRAW_STATE_ERROR error_code, const char *fail_msg) {
1963    if (!(pNode->status & status_mask)) {
1964        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1965                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS",
1966                       "CB object 0x%" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg);
1967    }
1968    return false;
1969}
1970
1971// Retrieve pipeline node ptr for given pipeline object
1972static PIPELINE_NODE *getPipeline(layer_data const *my_data, VkPipeline pipeline) {
1973    auto it = my_data->pipelineMap.find(pipeline);
1974    if (it == my_data->pipelineMap.end()) {
1975        return nullptr;
1976    }
1977    return it->second;
1978}
1979
1980static RENDER_PASS_NODE *getRenderPass(layer_data const *my_data, VkRenderPass renderpass) {
1981    auto it = my_data->renderPassMap.find(renderpass);
1982    if (it == my_data->renderPassMap.end()) {
1983        return nullptr;
1984    }
1985    return it->second;
1986}
1987
1988static FRAMEBUFFER_NODE *getFramebuffer(layer_data *my_data, VkFramebuffer framebuffer) {
1989    auto it = my_data->frameBufferMap.find(framebuffer);
1990    if (it == my_data->frameBufferMap.end()) {
1991        return nullptr;
1992    }
1993    return &it->second;
1994}
1995
1996static cvdescriptorset::DescriptorSetLayout const *getDescriptorSetLayout(layer_data const *my_data, VkDescriptorSetLayout dsLayout) {
1997    auto it = my_data->descriptorSetLayoutMap.find(dsLayout);
1998    if (it == my_data->descriptorSetLayoutMap.end()) {
1999        return nullptr;
2000    }
2001    return it->second;
2002}
2003
2004static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *my_data, VkPipelineLayout pipeLayout) {
2005    auto it = my_data->pipelineLayoutMap.find(pipeLayout);
2006    if (it == my_data->pipelineLayoutMap.end()) {
2007        return nullptr;
2008    }
2009    return &it->second;
2010}
2011
2012// Return true if for a given PSO, the given state enum is dynamic, else return false
2013static bool isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
2014    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2015        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2016            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2017                return true;
2018        }
2019    }
2020    return false;
2021}
2022
2023// Validate state stored as flags at time of draw call
2024static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe, bool indexedDraw) {
2025    bool result;
2026    result = validate_status(dev_data, pCB, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_VIEWPORT_NOT_BOUND,
2027                             "Dynamic viewport state not set for this command buffer");
2028    result |= validate_status(dev_data, pCB, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_SCISSOR_NOT_BOUND,
2029                              "Dynamic scissor state not set for this command buffer");
2030    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
2031        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2032         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
2033        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2034                                  DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
2035    }
2036    if (pPipe->graphicsPipelineCI.pRasterizationState &&
2037        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
2038        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2039                                  DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
2040    }
2041    if (pPipe->blendConstantsEnabled) {
2042        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2043                                  DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer");
2044    }
2045    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2046        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
2047        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2048                                  DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
2049    }
2050    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2051        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
2052        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2053                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
2054        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2055                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
2056        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2057                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
2058    }
2059    if (indexedDraw) {
2060        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2061                                  DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2062                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2063    }
2064    return result;
2065}
2066
2067// Verify attachment reference compatibility according to spec
2068//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2069//  If both AttachmentReference arrays have requested index, check their corresponding AttachementDescriptions
2070//   to make sure that format and samples counts match.
2071//  If not, they are not compatible.
2072static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2073                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2074                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2075                                             const VkAttachmentDescription *pSecondaryAttachments) {
2076    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2077        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2078            return true;
2079    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2080        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2081            return true;
2082    } else { // format and sample count must match
2083        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2084             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2085            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2086             pSecondaryAttachments[pSecondary[index].attachment].samples))
2087            return true;
2088    }
2089    // Format and sample counts didn't match
2090    return false;
2091}
2092
2093// For give primary and secondary RenderPass objects, verify that they're compatible
2094static bool verify_renderpass_compatibility(layer_data *my_data, const VkRenderPass primaryRP, const VkRenderPass secondaryRP,
2095                                            string &errorMsg) {
2096    auto primary_render_pass = getRenderPass(my_data, primaryRP);
2097    auto secondary_render_pass = getRenderPass(my_data, secondaryRP);
2098
2099    if (!primary_render_pass) {
2100        stringstream errorStr;
2101        errorStr << "invalid VkRenderPass (" << primaryRP << ")";
2102        errorMsg = errorStr.str();
2103        return false;
2104    }
2105
2106    if (!secondary_render_pass) {
2107        stringstream errorStr;
2108        errorStr << "invalid VkRenderPass (" << secondaryRP << ")";
2109        errorMsg = errorStr.str();
2110        return false;
2111    }
2112    // Trivial pass case is exact same RP
2113    if (primaryRP == secondaryRP) {
2114        return true;
2115    }
2116    const VkRenderPassCreateInfo *primaryRPCI = primary_render_pass->pCreateInfo;
2117    const VkRenderPassCreateInfo *secondaryRPCI = secondary_render_pass->pCreateInfo;
2118    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2119        stringstream errorStr;
2120        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2121                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2122        errorMsg = errorStr.str();
2123        return false;
2124    }
2125    uint32_t spIndex = 0;
2126    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2127        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2128        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2129        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2130        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2131        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2132            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2133                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2134                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2135                stringstream errorStr;
2136                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2137                errorMsg = errorStr.str();
2138                return false;
2139            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2140                                                         primaryColorCount, primaryRPCI->pAttachments,
2141                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2142                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2143                stringstream errorStr;
2144                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2145                errorMsg = errorStr.str();
2146                return false;
2147            }
2148        }
2149
2150        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2151                                              1, primaryRPCI->pAttachments,
2152                                              secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2153                                              1, secondaryRPCI->pAttachments)) {
2154            stringstream errorStr;
2155            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2156            errorMsg = errorStr.str();
2157            return false;
2158        }
2159
2160        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2161        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2162        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2163        for (uint32_t i = 0; i < inputMax; ++i) {
2164            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2165                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2166                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2167                stringstream errorStr;
2168                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2169                errorMsg = errorStr.str();
2170                return false;
2171            }
2172        }
2173    }
2174    return true;
2175}
2176
2177// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
2178// pipelineLayout[layoutIndex]
2179static bool verify_set_layout_compatibility(layer_data *my_data, const cvdescriptorset::DescriptorSet *pSet,
2180                                            const VkPipelineLayout layout, const uint32_t layoutIndex, string &errorMsg) {
2181    auto pipeline_layout = getPipelineLayout(my_data, layout);
2182    if (!pipeline_layout) {
2183        stringstream errorStr;
2184        errorStr << "invalid VkPipelineLayout (" << layout << ")";
2185        errorMsg = errorStr.str();
2186        return false;
2187    }
2188    if (layoutIndex >= pipeline_layout->descriptorSetLayouts.size()) {
2189        stringstream errorStr;
2190        errorStr << "VkPipelineLayout (" << layout << ") only contains " << pipeline_layout->descriptorSetLayouts.size()
2191                 << " setLayouts corresponding to sets 0-" << pipeline_layout->descriptorSetLayouts.size() - 1
2192                 << ", but you're attempting to bind set to index " << layoutIndex;
2193        errorMsg = errorStr.str();
2194        return false;
2195    }
2196    auto layout_node = pipeline_layout->setLayouts[layoutIndex];
2197    return pSet->IsCompatible(layout_node, &errorMsg);
2198}
2199
2200// Validate that data for each specialization entry is fully contained within the buffer.
2201static bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) {
2202    bool pass = true;
2203
2204    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2205
2206    if (spec) {
2207        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2208            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2209                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2210                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2211                            "Specialization entry %u (for constant id %u) references memory outside provided "
2212                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2213                            " bytes provided)",
2214                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2215                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2216
2217                    pass = false;
2218                }
2219            }
2220        }
2221    }
2222
2223    return pass;
2224}
2225
2226static bool descriptor_type_match(shader_module const *module, uint32_t type_id,
2227                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2228    auto type = module->get_def(type_id);
2229
2230    descriptor_count = 1;
2231
2232    /* Strip off any array or ptrs. Where we remove array levels, adjust the
2233     * descriptor count for each dimension. */
2234    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2235        if (type.opcode() == spv::OpTypeArray) {
2236            descriptor_count *= get_constant_value(module, type.word(3));
2237            type = module->get_def(type.word(2));
2238        }
2239        else {
2240            type = module->get_def(type.word(3));
2241        }
2242    }
2243
2244    switch (type.opcode()) {
2245    case spv::OpTypeStruct: {
2246        for (auto insn : *module) {
2247            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2248                if (insn.word(2) == spv::DecorationBlock) {
2249                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2250                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2251                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2252                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2253                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2254                }
2255            }
2256        }
2257
2258        /* Invalid */
2259        return false;
2260    }
2261
2262    case spv::OpTypeSampler:
2263        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER;
2264
2265    case spv::OpTypeSampledImage:
2266        if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2267            /* Slight relaxation for some GLSL historical madness: samplerBuffer
2268             * doesn't really have a sampler, and a texel buffer descriptor
2269             * doesn't really provide one. Allow this slight mismatch.
2270             */
2271            auto image_type = module->get_def(type.word(2));
2272            auto dim = image_type.word(3);
2273            auto sampled = image_type.word(7);
2274            return dim == spv::DimBuffer && sampled == 1;
2275        }
2276        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2277
2278    case spv::OpTypeImage: {
2279        /* Many descriptor types backing image types-- depends on dimension
2280         * and whether the image will be used with a sampler. SPIRV for
2281         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2282         * runtime is unacceptable.
2283         */
2284        auto dim = type.word(3);
2285        auto sampled = type.word(7);
2286
2287        if (dim == spv::DimSubpassData) {
2288            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2289        } else if (dim == spv::DimBuffer) {
2290            if (sampled == 1) {
2291                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2292            } else {
2293                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2294            }
2295        } else if (sampled == 1) {
2296            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
2297        } else {
2298            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2299        }
2300    }
2301
2302    /* We shouldn't really see any other junk types -- but if we do, they're
2303     * a mismatch.
2304     */
2305    default:
2306        return false; /* Mismatch */
2307    }
2308}
2309
2310static bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) {
2311    if (!feature) {
2312        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2313                    __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2314                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2315                    "enabled on the device",
2316                    feature_name)) {
2317            return false;
2318        }
2319    }
2320
2321    return true;
2322}
2323
2324static bool validate_shader_capabilities(debug_report_data *report_data, shader_module const *src,
2325                                         VkPhysicalDeviceFeatures const *enabledFeatures) {
2326    bool pass = true;
2327
2328
2329    for (auto insn : *src) {
2330        if (insn.opcode() == spv::OpCapability) {
2331            switch (insn.word(1)) {
2332            case spv::CapabilityMatrix:
2333            case spv::CapabilityShader:
2334            case spv::CapabilityInputAttachment:
2335            case spv::CapabilitySampled1D:
2336            case spv::CapabilityImage1D:
2337            case spv::CapabilitySampledBuffer:
2338            case spv::CapabilityImageBuffer:
2339            case spv::CapabilityImageQuery:
2340            case spv::CapabilityDerivativeControl:
2341                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2342                break;
2343
2344            case spv::CapabilityGeometry:
2345                pass &= require_feature(report_data, enabledFeatures->geometryShader, "geometryShader");
2346                break;
2347
2348            case spv::CapabilityTessellation:
2349                pass &= require_feature(report_data, enabledFeatures->tessellationShader, "tessellationShader");
2350                break;
2351
2352            case spv::CapabilityFloat64:
2353                pass &= require_feature(report_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2354                break;
2355
2356            case spv::CapabilityInt64:
2357                pass &= require_feature(report_data, enabledFeatures->shaderInt64, "shaderInt64");
2358                break;
2359
2360            case spv::CapabilityTessellationPointSize:
2361            case spv::CapabilityGeometryPointSize:
2362                pass &= require_feature(report_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2363                                        "shaderTessellationAndGeometryPointSize");
2364                break;
2365
2366            case spv::CapabilityImageGatherExtended:
2367                pass &= require_feature(report_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2368                break;
2369
2370            case spv::CapabilityStorageImageMultisample:
2371                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2372                break;
2373
2374            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2375                pass &= require_feature(report_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2376                                        "shaderUniformBufferArrayDynamicIndexing");
2377                break;
2378
2379            case spv::CapabilitySampledImageArrayDynamicIndexing:
2380                pass &= require_feature(report_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2381                                        "shaderSampledImageArrayDynamicIndexing");
2382                break;
2383
2384            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2385                pass &= require_feature(report_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2386                                        "shaderStorageBufferArrayDynamicIndexing");
2387                break;
2388
2389            case spv::CapabilityStorageImageArrayDynamicIndexing:
2390                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2391                                        "shaderStorageImageArrayDynamicIndexing");
2392                break;
2393
2394            case spv::CapabilityClipDistance:
2395                pass &= require_feature(report_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2396                break;
2397
2398            case spv::CapabilityCullDistance:
2399                pass &= require_feature(report_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2400                break;
2401
2402            case spv::CapabilityImageCubeArray:
2403                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2404                break;
2405
2406            case spv::CapabilitySampleRateShading:
2407                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2408                break;
2409
2410            case spv::CapabilitySparseResidency:
2411                pass &= require_feature(report_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2412                break;
2413
2414            case spv::CapabilityMinLod:
2415                pass &= require_feature(report_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2416                break;
2417
2418            case spv::CapabilitySampledCubeArray:
2419                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2420                break;
2421
2422            case spv::CapabilityImageMSArray:
2423                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2424                break;
2425
2426            case spv::CapabilityStorageImageExtendedFormats:
2427                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageExtendedFormats,
2428                                        "shaderStorageImageExtendedFormats");
2429                break;
2430
2431            case spv::CapabilityInterpolationFunction:
2432                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2433                break;
2434
2435            case spv::CapabilityStorageImageReadWithoutFormat:
2436                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2437                                        "shaderStorageImageReadWithoutFormat");
2438                break;
2439
2440            case spv::CapabilityStorageImageWriteWithoutFormat:
2441                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2442                                        "shaderStorageImageWriteWithoutFormat");
2443                break;
2444
2445            case spv::CapabilityMultiViewport:
2446                pass &= require_feature(report_data, enabledFeatures->multiViewport, "multiViewport");
2447                break;
2448
2449            default:
2450                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2451                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2452                            "Shader declares capability %u, not supported in Vulkan.",
2453                            insn.word(1)))
2454                    pass = false;
2455                break;
2456            }
2457        }
2458    }
2459
2460    return pass;
2461}
2462
2463static bool validate_pipeline_shader_stage(debug_report_data *report_data,
2464                                           VkPipelineShaderStageCreateInfo const *pStage,
2465                                           PIPELINE_NODE *pipeline,
2466                                           shader_module **out_module,
2467                                           spirv_inst_iter *out_entrypoint,
2468                                           VkPhysicalDeviceFeatures const *enabledFeatures,
2469                                           std::unordered_map<VkShaderModule,
2470                                           std::unique_ptr<shader_module>> const &shaderModuleMap) {
2471    bool pass = true;
2472    auto module_it = shaderModuleMap.find(pStage->module);
2473    auto module = *out_module = module_it->second.get();
2474    pass &= validate_specialization_offsets(report_data, pStage);
2475
2476    /* find the entrypoint */
2477    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2478    if (entrypoint == module->end()) {
2479        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2480                    __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2481                    "No entrypoint found named `%s` for stage %s", pStage->pName,
2482                    string_VkShaderStageFlagBits(pStage->stage))) {
2483            pass = false;
2484        }
2485    }
2486
2487    /* validate shader capabilities against enabled device features */
2488    pass &= validate_shader_capabilities(report_data, module, enabledFeatures);
2489
2490    /* mark accessible ids */
2491    std::unordered_set<uint32_t> accessible_ids;
2492    mark_accessible_ids(module, entrypoint, accessible_ids);
2493
2494    /* validate descriptor set layout against what the entrypoint actually uses */
2495    std::map<descriptor_slot_t, interface_var> descriptor_uses;
2496    collect_interface_by_descriptor_slot(report_data, module, accessible_ids, descriptor_uses);
2497
2498    auto pipelineLayout = pipeline->pipelineLayout;
2499
2500    /* validate push constant usage */
2501    pass &= validate_push_constant_usage(report_data, &pipelineLayout->pushConstantRanges,
2502                                        module, accessible_ids, pStage->stage);
2503
2504    /* validate descriptor use */
2505    for (auto use : descriptor_uses) {
2506        // While validating shaders capture which slots are used by the pipeline
2507        pipeline->active_slots[use.first.first].insert(use.first.second);
2508
2509        /* verify given pipelineLayout has requested setLayout with requested binding */
2510        const auto & binding = get_descriptor_binding(pipelineLayout, use.first);
2511        unsigned required_descriptor_count;
2512
2513        if (!binding) {
2514            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2515                        __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2516                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2517                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2518                pass = false;
2519            }
2520        } else if (~binding->stageFlags & pStage->stage) {
2521            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2522                        /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2523                        "Shader uses descriptor slot %u.%u (used "
2524                        "as type `%s`) but descriptor not "
2525                        "accessible from stage %s",
2526                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2527                        string_VkShaderStageFlagBits(pStage->stage))) {
2528                pass = false;
2529            }
2530        } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType,
2531                                          /*out*/ required_descriptor_count)) {
2532            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2533                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", "Type mismatch on descriptor slot "
2534                                                                       "%u.%u (used as type `%s`) but "
2535                                                                       "descriptor of type %s",
2536                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2537                        string_VkDescriptorType(binding->descriptorType))) {
2538                pass = false;
2539            }
2540        } else if (binding->descriptorCount < required_descriptor_count) {
2541            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2542                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2543                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2544                        required_descriptor_count, use.first.first, use.first.second,
2545                        describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
2546                pass = false;
2547            }
2548        }
2549    }
2550
2551    return pass;
2552}
2553
2554
2555// Validate that the shaders used by the given pipeline and store the active_slots
2556//  that are actually used by the pipeline into pPipeline->active_slots
2557static bool validate_and_capture_pipeline_shader_state(debug_report_data *report_data, PIPELINE_NODE *pPipeline,
2558                                                       VkPhysicalDeviceFeatures const *enabledFeatures,
2559                                                       std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
2560    auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr();
2561    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2562    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2563
2564    shader_module *shaders[5];
2565    memset(shaders, 0, sizeof(shaders));
2566    spirv_inst_iter entrypoints[5];
2567    memset(entrypoints, 0, sizeof(entrypoints));
2568    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2569    bool pass = true;
2570
2571    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2572        auto pStage = &pCreateInfo->pStages[i];
2573        auto stage_id = get_shader_stage_id(pStage->stage);
2574        pass &= validate_pipeline_shader_stage(report_data, pStage, pPipeline,
2575                                               &shaders[stage_id], &entrypoints[stage_id],
2576                                               enabledFeatures, shaderModuleMap);
2577    }
2578
2579    vi = pCreateInfo->pVertexInputState;
2580
2581    if (vi) {
2582        pass &= validate_vi_consistency(report_data, vi);
2583    }
2584
2585    if (shaders[vertex_stage]) {
2586        pass &= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2587    }
2588
2589    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2590    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2591
2592    while (!shaders[producer] && producer != fragment_stage) {
2593        producer++;
2594        consumer++;
2595    }
2596
2597    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2598        assert(shaders[producer]);
2599        if (shaders[consumer]) {
2600            pass &= validate_interface_between_stages(report_data,
2601                                                      shaders[producer], entrypoints[producer], &shader_stage_attribs[producer],
2602                                                      shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]);
2603
2604            producer = consumer;
2605        }
2606    }
2607
2608    if (shaders[fragment_stage] && pPipeline->renderPass) {
2609        pass &= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage],
2610                                                        pPipeline->renderPass, pCreateInfo->subpass);
2611    }
2612
2613    return pass;
2614}
2615
2616static bool validate_compute_pipeline(debug_report_data *report_data, PIPELINE_NODE *pPipeline, VkPhysicalDeviceFeatures const *enabledFeatures,
2617                                      std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
2618    auto pCreateInfo = pPipeline->computePipelineCI.ptr();
2619
2620    shader_module *module;
2621    spirv_inst_iter entrypoint;
2622
2623    return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline,
2624                                          &module, &entrypoint, enabledFeatures, shaderModuleMap);
2625}
2626
2627// Return Set node ptr for specified set or else NULL
2628static cvdescriptorset::DescriptorSet *getSetNode(layer_data *my_data, const VkDescriptorSet set) {
2629    if (my_data->setMap.find(set) == my_data->setMap.end()) {
2630        return NULL;
2631    }
2632    return my_data->setMap[set];
2633}
2634// For the given command buffer, verify and update the state for activeSetBindingsPairs
2635//  This includes:
2636//  1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound.
2637//     To be valid, the dynamic offset combined with the offset and range from its
2638//     descriptor update must not overflow the size of its buffer being updated
2639//  2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images
2640//  3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers
2641static bool validate_and_update_drawtime_descriptor_state(
2642    layer_data *dev_data, GLOBAL_CB_NODE *pCB,
2643    const vector<std::tuple<cvdescriptorset::DescriptorSet *, unordered_set<uint32_t>,
2644                            std::vector<uint32_t> const *>> &activeSetBindingsPairs) {
2645    bool result = false;
2646    for (auto set_bindings_pair : activeSetBindingsPairs) {
2647        cvdescriptorset::DescriptorSet *set_node = std::get<0>(set_bindings_pair);
2648        std::string err_str;
2649        if (!set_node->ValidateDrawState(std::get<1>(set_bindings_pair), *std::get<2>(set_bindings_pair),
2650                                         &err_str)) {
2651            // Report error here
2652            auto set = set_node->GetSet();
2653            result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2654                              reinterpret_cast<const uint64_t &>(set), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2655                              "DS 0x%" PRIxLEAST64 " encountered the following validation error at draw time: %s",
2656                              reinterpret_cast<const uint64_t &>(set), err_str.c_str());
2657        }
2658        set_node->GetStorageUpdates(std::get<1>(set_bindings_pair), &pCB->updateBuffers, &pCB->updateImages);
2659    }
2660    return result;
2661}
2662// TODO : This is a temp function that naively updates bound storage images and buffers based on which descriptor sets are bound.
2663//   When validate_and_update_draw_state() handles compute shaders so that active_slots is correct for compute pipelines, this
2664//   function can be killed and validate_and_update_draw_state() used instead
2665static void update_shader_storage_images_and_buffers(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
2666    // For the bound descriptor sets, pull off any storage images and buffers
2667    //  This may be more than are actually updated depending on which are active, but for now this is a stop-gap for compute
2668    //  pipelines
2669    for (auto set : pCB->lastBound[VK_PIPELINE_BIND_POINT_COMPUTE].uniqueBoundSets) {
2670        set->GetAllStorageUpdates(&pCB->updateBuffers, &pCB->updateImages);
2671    }
2672}
2673
2674// For given pipeline, return number of MSAA samples, or one if MSAA disabled
2675static VkSampleCountFlagBits getNumSamples(PIPELINE_NODE const *pipe) {
2676    if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
2677        VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
2678        return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
2679    }
2680    return VK_SAMPLE_COUNT_1_BIT;
2681}
2682
2683// Validate draw-time state related to the PSO
2684static bool validatePipelineDrawtimeState(layer_data const *my_data, const GLOBAL_CB_NODE *pCB,
2685                                          const VkPipelineBindPoint pipelineBindPoint, PIPELINE_NODE const *pPipeline) {
2686    bool skip_call = false;
2687    if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
2688        // Verify that any MSAA request in PSO matches sample# in bound FB
2689        // Skip the check if rasterization is disabled.
2690        if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
2691            (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
2692            VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
2693            if (pCB->activeRenderPass) {
2694                const VkRenderPassCreateInfo *render_pass_info = pCB->activeRenderPass->pCreateInfo;
2695                const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
2696                VkSampleCountFlagBits subpass_num_samples = VkSampleCountFlagBits(0);
2697                uint32_t i;
2698
2699                const VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
2700                if ((color_blend_state != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) &&
2701                    (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount)) {
2702                    skip_call |=
2703                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2704                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
2705                                "Render pass subpass %u mismatch with blending state defined and blend state attachment "
2706                                "count %u while subpass color attachment count %u in Pipeline (0x%" PRIxLEAST64 ")!  These "
2707                                "must be the same at draw-time.",
2708                                pCB->activeSubpass, color_blend_state->attachmentCount, subpass_desc->colorAttachmentCount,
2709                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
2710                }
2711
2712                for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
2713                    VkSampleCountFlagBits samples;
2714
2715                    if (subpass_desc->pColorAttachments[i].attachment == VK_ATTACHMENT_UNUSED)
2716                        continue;
2717
2718                    samples = render_pass_info->pAttachments[subpass_desc->pColorAttachments[i].attachment].samples;
2719                    if (subpass_num_samples == static_cast<VkSampleCountFlagBits>(0)) {
2720                        subpass_num_samples = samples;
2721                    } else if (subpass_num_samples != samples) {
2722                        subpass_num_samples = static_cast<VkSampleCountFlagBits>(-1);
2723                        break;
2724                    }
2725                }
2726                if ((subpass_desc->pDepthStencilAttachment != NULL) &&
2727                    (subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
2728                    const VkSampleCountFlagBits samples =
2729                        render_pass_info->pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples;
2730                    if (subpass_num_samples == static_cast<VkSampleCountFlagBits>(0))
2731                        subpass_num_samples = samples;
2732                    else if (subpass_num_samples != samples)
2733                        subpass_num_samples = static_cast<VkSampleCountFlagBits>(-1);
2734                }
2735
2736                if (((subpass_desc->colorAttachmentCount > 0) || (subpass_desc->pDepthStencilAttachment != NULL)) &&
2737                    (pso_num_samples != subpass_num_samples)) {
2738                    skip_call |=
2739                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2740                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
2741                                "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
2742                                ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
2743                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples,
2744                                reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples);
2745                }
2746            } else {
2747                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2748                                     reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
2749                                     "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
2750                                     reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
2751            }
2752        }
2753        // TODO : Add more checks here
2754    } else {
2755        // TODO : Validate non-gfx pipeline updates
2756    }
2757    return skip_call;
2758}
2759
2760// Validate overall state at the time of a draw call
2761static bool validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, const bool indexedDraw,
2762                                           const VkPipelineBindPoint bindPoint) {
2763    bool result = false;
2764    auto const &state = pCB->lastBound[bindPoint];
2765    PIPELINE_NODE *pPipe = getPipeline(my_data, state.pipeline);
2766    if (nullptr == pPipe) {
2767        result |= log_msg(
2768            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2769            DRAWSTATE_INVALID_PIPELINE, "DS",
2770            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
2771        // Early return as any further checks below will be busted w/o a pipeline
2772        if (result)
2773            return true;
2774    }
2775    // First check flag states
2776    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
2777        result = validate_draw_state_flags(my_data, pCB, pPipe, indexedDraw);
2778    else {
2779        // First block of code below to validate active sets should eventually
2780        //  work for the compute case but currently doesn't so return early for now
2781        // TODO : When active sets in compute shaders are correctly parsed,
2782        //  stop returning early here and handle them in top block below
2783        return result;
2784    }
2785
2786    // Now complete other state checks
2787    // TODO : When Compute shaders are properly parsed, fix this section to validate them as well
2788    if (state.pipelineLayout) {
2789        string errorString;
2790        // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
2791        vector<std::tuple<cvdescriptorset::DescriptorSet *, unordered_set<uint32_t>, std::vector<uint32_t> const *>> activeSetBindingsPairs;
2792        for (auto & setBindingPair : pPipe->active_slots) {
2793            uint32_t setIndex = setBindingPair.first;
2794            // If valid set is not bound throw an error
2795            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
2796                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2797                                  DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
2798                                  "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline,
2799                                  setIndex);
2800            } else if (!verify_set_layout_compatibility(my_data, state.boundDescriptorSets[setIndex],
2801                                                        pPipe->graphicsPipelineCI.layout, setIndex, errorString)) {
2802                // Set is bound but not compatible w/ overlapping pipelineLayout from PSO
2803                VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
2804                result |=
2805                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2806                            (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
2807                            "VkDescriptorSet (0x%" PRIxLEAST64
2808                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
2809                            (uint64_t)setHandle, setIndex, (uint64_t)pPipe->graphicsPipelineCI.layout, errorString.c_str());
2810            } else { // Valid set is bound and layout compatible, validate that it's updated
2811                // Pull the set node
2812                cvdescriptorset::DescriptorSet *pSet = state.boundDescriptorSets[setIndex];
2813                // Save vector of all active sets to verify dynamicOffsets below
2814                activeSetBindingsPairs.push_back(std::make_tuple(pSet, setBindingPair.second,
2815                                                                 &state.dynamicOffsets[setIndex]));
2816                // Make sure set has been updated if it has no immutable samplers
2817                //  If it has immutable samplers, we'll flag error later as needed depending on binding
2818                if (!pSet->IsUpdated()) {
2819                    for (auto binding : setBindingPair.second) {
2820                        if (!pSet->GetImmutableSamplerPtrFromBinding(binding)) {
2821                            result |= log_msg(
2822                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2823                                (uint64_t)pSet->GetSet(), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2824                                "DS 0x%" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
2825                                "this will result in undefined behavior.",
2826                                (uint64_t)pSet->GetSet());
2827                        }
2828                    }
2829                }
2830            }
2831        }
2832        // For given active slots, verify any dynamic descriptors and record updated images & buffers
2833        result |= validate_and_update_drawtime_descriptor_state(my_data, pCB, activeSetBindingsPairs);
2834    }
2835    // TODO : If/when compute pipelines/shaders are handled above, code below is only for gfx bind poing
2836    //if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint) {
2837    // Verify Vtx binding
2838    if (pPipe->vertexBindingDescriptions.size() > 0) {
2839        for (size_t i = 0; i < pPipe->vertexBindingDescriptions.size(); i++) {
2840            if ((pCB->currentDrawData.buffers.size() < (i + 1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) {
2841                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2842                                  __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2843                                  "The Pipeline State Object (0x%" PRIxLEAST64
2844                                  ") expects that this Command Buffer's vertex binding Index " PRINTF_SIZE_T_SPECIFIER
2845                                  " should be set via vkCmdBindVertexBuffers.",
2846                                  (uint64_t)state.pipeline, i);
2847            }
2848        }
2849    } else {
2850        if (!pCB->currentDrawData.buffers.empty()) {
2851            result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
2852                              0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2853                              "Vertex buffers are bound to command buffer (0x%" PRIxLEAST64
2854                              ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
2855                              (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline);
2856        }
2857    }
2858    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2859    // Skip check if rasterization is disabled or there is no viewport.
2860    if ((!pPipe->graphicsPipelineCI.pRasterizationState ||
2861         (pPipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
2862        pPipe->graphicsPipelineCI.pViewportState) {
2863        bool dynViewport = isDynamic(pPipe, VK_DYNAMIC_STATE_VIEWPORT);
2864        bool dynScissor = isDynamic(pPipe, VK_DYNAMIC_STATE_SCISSOR);
2865        if (dynViewport) {
2866            if (pCB->viewports.size() != pPipe->graphicsPipelineCI.pViewportState->viewportCount) {
2867                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2868                                  __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2869                                  "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER
2870                                  ", but PSO viewportCount is %u. These counts must match.",
2871                                  pCB->viewports.size(), pPipe->graphicsPipelineCI.pViewportState->viewportCount);
2872            }
2873        }
2874        if (dynScissor) {
2875            if (pCB->scissors.size() != pPipe->graphicsPipelineCI.pViewportState->scissorCount) {
2876                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2877                                  __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2878                                  "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER
2879                                  ", but PSO scissorCount is %u. These counts must match.",
2880                                  pCB->scissors.size(), pPipe->graphicsPipelineCI.pViewportState->scissorCount);
2881            }
2882        }
2883    }
2884    //} // end of "if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint) {" block
2885
2886    // Check general pipeline state that needs to be validated at drawtime
2887    result |= validatePipelineDrawtimeState(my_data, pCB, bindPoint, pPipe);
2888
2889    return result;
2890}
2891
2892// Validate HW line width capabilities prior to setting requested line width.
2893static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) {
2894    bool skip_call = false;
2895
2896    // First check to see if the physical device supports wide lines.
2897    if ((VK_FALSE == my_data->phys_dev_properties.features.wideLines) && (1.0f != lineWidth)) {
2898        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__,
2899                             dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature "
2900                                            "not supported/enabled so lineWidth must be 1.0f!",
2901                             lineWidth);
2902    } else {
2903        // Otherwise, make sure the width falls in the valid range.
2904        if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
2905            (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
2906            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target,
2907                                 __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width "
2908                                                          "to between [%f, %f]!",
2909                                 lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0],
2910                                 my_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
2911        }
2912    }
2913
2914    return skip_call;
2915}
2916
2917// Verify that create state for a pipeline is valid
2918static bool verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
2919                                      int pipelineIndex) {
2920    bool skipCall = false;
2921
2922    PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
2923
2924    // If create derivative bit is set, check that we've specified a base
2925    // pipeline correctly, and that the base pipeline was created to allow
2926    // derivatives.
2927    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
2928        PIPELINE_NODE *pBasePipeline = nullptr;
2929        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
2930              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
2931            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2932                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
2933                                "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
2934        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
2935            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
2936                skipCall |=
2937                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2938                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
2939                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
2940            } else {
2941                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
2942            }
2943        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
2944            pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
2945        }
2946
2947        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
2948            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2949                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
2950                                "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
2951        }
2952    }
2953
2954    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
2955        if (!my_data->phys_dev_properties.features.independentBlend) {
2956            if (pPipeline->attachments.size() > 1) {
2957                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
2958                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
2959                    if ((pAttachments[0].blendEnable != pAttachments[i].blendEnable) ||
2960                        (pAttachments[0].srcColorBlendFactor != pAttachments[i].srcColorBlendFactor) ||
2961                        (pAttachments[0].dstColorBlendFactor != pAttachments[i].dstColorBlendFactor) ||
2962                        (pAttachments[0].colorBlendOp != pAttachments[i].colorBlendOp) ||
2963                        (pAttachments[0].srcAlphaBlendFactor != pAttachments[i].srcAlphaBlendFactor) ||
2964                        (pAttachments[0].dstAlphaBlendFactor != pAttachments[i].dstAlphaBlendFactor) ||
2965                        (pAttachments[0].alphaBlendOp != pAttachments[i].alphaBlendOp) ||
2966                        (pAttachments[0].colorWriteMask != pAttachments[i].colorWriteMask)) {
2967                        skipCall |=
2968                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2969                            DRAWSTATE_INDEPENDENT_BLEND, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
2970                            "enabled, all elements of pAttachments must be identical");
2971                    }
2972                }
2973            }
2974        }
2975        if (!my_data->phys_dev_properties.features.logicOp &&
2976            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
2977            skipCall |=
2978                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2979                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
2980                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
2981        }
2982        if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) &&
2983            ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) ||
2984             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
2985            skipCall |=
2986                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2987                        DRAWSTATE_INVALID_LOGIC_OP, "DS",
2988                        "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
2989        }
2990    }
2991
2992    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
2993    // produces nonsense errors that confuse users. Other layers should already
2994    // emit errors for renderpass being invalid.
2995    auto renderPass = getRenderPass(my_data, pPipeline->graphicsPipelineCI.renderPass);
2996    if (renderPass &&
2997        pPipeline->graphicsPipelineCI.subpass >= renderPass->pCreateInfo->subpassCount) {
2998        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2999                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3000                                                                           "is out of range for this renderpass (0..%u)",
3001                            pPipeline->graphicsPipelineCI.subpass, renderPass->pCreateInfo->subpassCount - 1);
3002    }
3003
3004    if (!validate_and_capture_pipeline_shader_state(my_data->report_data, pPipeline, &my_data->phys_dev_properties.features,
3005                                                    my_data->shaderModuleMap)) {
3006        skipCall = true;
3007    }
3008    // Each shader's stage must be unique
3009    if (pPipeline->duplicate_shaders) {
3010        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
3011            if (pPipeline->duplicate_shaders & stage) {
3012                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3013                                    __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3014                                    "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
3015                                    string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
3016            }
3017        }
3018    }
3019    // VS is required
3020    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3021        skipCall |=
3022            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3023                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3024    }
3025    // Either both or neither TC/TE shaders should be defined
3026    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3027        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3028        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3029                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3030                            "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3031    }
3032    // Compute shaders should be specified independent of Gfx shaders
3033    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3034        (pPipeline->active_shaders &
3035         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3036          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3037        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3038                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3039                            "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3040    }
3041    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3042    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3043    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3044        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3045         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3046        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3047                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3048                                                                           "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3049                                                                           "topology for tessellation pipelines");
3050    }
3051    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3052        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3053        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3054            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3055                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3056                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3057                                                                               "topology is only valid for tessellation pipelines");
3058        }
3059        if (!pPipeline->graphicsPipelineCI.pTessellationState) {
3060            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3061                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3062                                "Invalid Pipeline CreateInfo State: "
3063                                "pTessellationState is NULL when VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3064                                "topology used. pTessellationState must not be NULL in this case.");
3065        } else if (!pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints ||
3066                   (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints > 32)) {
3067            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3068                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3069                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3070                                                                               "topology used with patchControlPoints value %u."
3071                                                                               " patchControlPoints should be >0 and <=32.",
3072                                pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints);
3073        }
3074    }
3075    // If a rasterization state is provided, make sure that the line width conforms to the HW.
3076    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3077        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
3078            skipCall |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, reinterpret_cast<uint64_t &>(pPipeline),
3079                                        pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
3080        }
3081    }
3082    // Viewport state must be included if rasterization is enabled.
3083    // If the viewport state is included, the viewport and scissor counts should always match.
3084    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3085    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3086        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3087        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3088            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3089                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3090                                                                           "and scissors are dynamic PSO must include "
3091                                                                           "viewportCount and scissorCount in pViewportState.");
3092        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3093                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3094            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3095                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3096                                "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3097                                pPipeline->graphicsPipelineCI.pViewportState->viewportCount,
3098                                pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3099        } else {
3100            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3101            bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3102            bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3103            if (!dynViewport) {
3104                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3105                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3106                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3107                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3108                                        "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3109                                        "must either include pViewports data, or include viewport in pDynamicState and set it with "
3110                                        "vkCmdSetViewport().",
3111                                        pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3112                }
3113            }
3114            if (!dynScissor) {
3115                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3116                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3117                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3118                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3119                                        "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3120                                        "must either include pScissors data, or include scissor in pDynamicState and set it with "
3121                                        "vkCmdSetScissor().",
3122                                        pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3123                }
3124            }
3125        }
3126    }
3127    return skipCall;
3128}
3129
3130// Free the Pipeline nodes
3131static void deletePipelines(layer_data *my_data) {
3132    if (my_data->pipelineMap.size() <= 0)
3133        return;
3134    for (auto &pipe_map_pair : my_data->pipelineMap) {
3135        delete pipe_map_pair.second;
3136    }
3137    my_data->pipelineMap.clear();
3138}
3139
3140// Block of code at start here specifically for managing/tracking DSs
3141
3142// Return Pool node ptr for specified pool or else NULL
3143static DESCRIPTOR_POOL_NODE *getPoolNode(layer_data *my_data, const VkDescriptorPool pool) {
3144    if (my_data->descriptorPoolMap.find(pool) == my_data->descriptorPoolMap.end()) {
3145        return NULL;
3146    }
3147    return my_data->descriptorPoolMap[pool];
3148}
3149
3150// Return false if update struct is of valid type, otherwise flag error and return code from callback
3151static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3152    switch (pUpdateStruct->sType) {
3153    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3154    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3155        return false;
3156    default:
3157        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3158                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3159                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3160                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3161    }
3162}
3163
3164// Set count for given update struct in the last parameter
3165static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3166    switch (pUpdateStruct->sType) {
3167    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3168        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3169    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3170        // TODO : Need to understand this case better and make sure code is correct
3171        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3172    default:
3173        return 0;
3174    }
3175}
3176
3177// For given layout and update, return the first overall index of the layout that is updated
3178static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3179                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3180    return binding_start_index + arrayIndex;
3181}
3182// For given layout and update, return the last overall index of the layout that is updated
3183static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3184                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3185    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3186    return binding_start_index + arrayIndex + count - 1;
3187}
3188// Verify that the descriptor type in the update struct matches what's expected by the layout
3189static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type,
3190                                      const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3191    // First get actual type of update
3192    bool skipCall = false;
3193    VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM;
3194    switch (pUpdateStruct->sType) {
3195    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3196        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3197        break;
3198    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3199        /* no need to validate */
3200        return false;
3201        break;
3202    default:
3203        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3204                            DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3205                            "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3206                            string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3207    }
3208    if (!skipCall) {
3209        if (layout_type != actualType) {
3210            skipCall |= log_msg(
3211                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3212                DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3213                "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3214                string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type));
3215        }
3216    }
3217    return skipCall;
3218}
3219//TODO: Consolidate functions
3220bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
3221    layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
3222    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3223        return false;
3224    }
3225    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3226    imgpair.subresource.aspectMask = aspectMask;
3227    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3228    if (imgsubIt == pCB->imageLayoutMap.end()) {
3229        return false;
3230    }
3231    if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
3232        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3233                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3234                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3235                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout));
3236    }
3237    if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
3238        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3239                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3240                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s",
3241                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout));
3242    }
3243    node = imgsubIt->second;
3244    return true;
3245}
3246
3247bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
3248    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3249        return false;
3250    }
3251    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3252    imgpair.subresource.aspectMask = aspectMask;
3253    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3254    if (imgsubIt == my_data->imageLayoutMap.end()) {
3255        return false;
3256    }
3257    if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
3258        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3259                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3260                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3261                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout));
3262    }
3263    layout = imgsubIt->second.layout;
3264    return true;
3265}
3266
3267// find layout(s) on the cmd buf level
3268bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3269    ImageSubresourcePair imgpair = {image, true, range};
3270    node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
3271    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
3272    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
3273    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
3274    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
3275    if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3276        imgpair = {image, false, VkImageSubresource()};
3277        auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3278        if (imgsubIt == pCB->imageLayoutMap.end())
3279            return false;
3280        node = imgsubIt->second;
3281    }
3282    return true;
3283}
3284
3285// find layout(s) on the global level
3286bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3287    layout = VK_IMAGE_LAYOUT_MAX_ENUM;
3288    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3289    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3290    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3291    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3292    if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3293        imgpair = {imgpair.image, false, VkImageSubresource()};
3294        auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3295        if (imgsubIt == my_data->imageLayoutMap.end())
3296            return false;
3297        layout = imgsubIt->second.layout;
3298    }
3299    return true;
3300}
3301
3302bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3303    ImageSubresourcePair imgpair = {image, true, range};
3304    return FindLayout(my_data, imgpair, layout);
3305}
3306
3307bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3308    auto sub_data = my_data->imageSubresourceMap.find(image);
3309    if (sub_data == my_data->imageSubresourceMap.end())
3310        return false;
3311    auto imgIt = my_data->imageMap.find(image);
3312    if (imgIt == my_data->imageMap.end())
3313        return false;
3314    bool ignoreGlobal = false;
3315    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3316    // potential errors in this case.
3317    if (sub_data->second.size() >= (imgIt->second.createInfo.arrayLayers * imgIt->second.createInfo.mipLevels + 1)) {
3318        ignoreGlobal = true;
3319    }
3320    for (auto imgsubpair : sub_data->second) {
3321        if (ignoreGlobal && !imgsubpair.hasSubresource)
3322            continue;
3323        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3324        if (img_data != my_data->imageLayoutMap.end()) {
3325            layouts.push_back(img_data->second.layout);
3326        }
3327    }
3328    return true;
3329}
3330
3331// Set the layout on the global level
3332void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3333    VkImage &image = imgpair.image;
3334    // TODO (mlentine): Maybe set format if new? Not used atm.
3335    my_data->imageLayoutMap[imgpair].layout = layout;
3336    // TODO (mlentine): Maybe make vector a set?
3337    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3338    if (subresource == my_data->imageSubresourceMap[image].end()) {
3339        my_data->imageSubresourceMap[image].push_back(imgpair);
3340    }
3341}
3342
3343// Set the layout on the cmdbuf level
3344void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3345    pCB->imageLayoutMap[imgpair] = node;
3346    // TODO (mlentine): Maybe make vector a set?
3347    auto subresource =
3348        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3349    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3350        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3351    }
3352}
3353
3354void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3355    // TODO (mlentine): Maybe make vector a set?
3356    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3357        pCB->imageSubresourceMap[imgpair.image].end()) {
3358        pCB->imageLayoutMap[imgpair].layout = layout;
3359    } else {
3360        // TODO (mlentine): Could be expensive and might need to be removed.
3361        assert(imgpair.hasSubresource);
3362        IMAGE_CMD_BUF_LAYOUT_NODE node;
3363        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3364            node.initialLayout = layout;
3365        }
3366        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3367    }
3368}
3369
3370template <class OBJECT, class LAYOUT>
3371void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3372    if (imgpair.subresource.aspectMask & aspectMask) {
3373        imgpair.subresource.aspectMask = aspectMask;
3374        SetLayout(pObject, imgpair, layout);
3375    }
3376}
3377
3378template <class OBJECT, class LAYOUT>
3379void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3380    ImageSubresourcePair imgpair = {image, true, range};
3381    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3382    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3383    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3384    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3385}
3386
3387template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3388    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3389    SetLayout(pObject, image, imgpair, layout);
3390}
3391
3392void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3393    auto image_view_data = dev_data->imageViewMap.find(imageView);
3394    assert(image_view_data != dev_data->imageViewMap.end());
3395    const VkImage &image = image_view_data->second.image;
3396    const VkImageSubresourceRange &subRange = image_view_data->second.subresourceRange;
3397    // TODO: Do not iterate over every possibility - consolidate where possible
3398    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3399        uint32_t level = subRange.baseMipLevel + j;
3400        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3401            uint32_t layer = subRange.baseArrayLayer + k;
3402            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3403            SetLayout(pCB, image, sub, layout);
3404        }
3405    }
3406}
3407
3408// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3409// func_str is the name of the calling function
3410// Return false if no errors occur
3411// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
3412static bool validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
3413    bool skip_call = false;
3414    auto set_node = my_data->setMap.find(set);
3415    if (set_node == my_data->setMap.end()) {
3416        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3417                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3418                             "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3419                             (uint64_t)(set));
3420    } else {
3421        if (set_node->second->in_use.load()) {
3422            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3423                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
3424                                 "DS", "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer.",
3425                                 func_str.c_str(), (uint64_t)(set));
3426        }
3427    }
3428    return skip_call;
3429}
3430
3431// Free the descriptor set, remove it from setMap and invalidate any cmd buffers that it was bound to
3432static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
3433    dev_data->setMap.erase(descriptor_set->GetSet());
3434    delete descriptor_set;
3435}
3436// Free all DS Pools including their Sets & related sub-structs
3437// NOTE : Calls to this function should be wrapped in mutex
3438static void deletePools(layer_data *my_data) {
3439    if (my_data->descriptorPoolMap.size() <= 0)
3440        return;
3441    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
3442        // Remove this pools' sets from setMap and delete them
3443        for (auto ds : (*ii).second->sets) {
3444            freeDescriptorSet(my_data, ds);
3445        }
3446        (*ii).second->sets.clear();
3447    }
3448    my_data->descriptorPoolMap.clear();
3449}
3450
3451static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
3452                                VkDescriptorPoolResetFlags flags) {
3453    DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
3454    if (!pPool) {
3455        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
3456                (uint64_t)pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
3457                "Unable to find pool node for pool 0x%" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t)pool);
3458    } else {
3459        // TODO: validate flags
3460        // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
3461        for (auto ds : pPool->sets) {
3462            freeDescriptorSet(my_data, ds);
3463        }
3464        pPool->sets.clear();
3465        // Reset available count for each type and available sets for this pool
3466        for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
3467            pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
3468        }
3469        pPool->availableSets = pPool->maxSets;
3470    }
3471}
3472
3473// For given CB object, fetch associated CB Node from map
3474static GLOBAL_CB_NODE *getCBNode(layer_data const *my_data, const VkCommandBuffer cb) {
3475    auto it = my_data->commandBufferMap.find(cb);
3476    if (it == my_data->commandBufferMap.end()) {
3477        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3478                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3479                "Attempt to use CommandBuffer 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
3480        return NULL;
3481    }
3482    return it->second;
3483}
3484// Free all CB Nodes
3485// NOTE : Calls to this function should be wrapped in mutex
3486static void deleteCommandBuffers(layer_data *my_data) {
3487    if (my_data->commandBufferMap.empty()) {
3488        return;
3489    }
3490    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
3491        delete (*ii).second;
3492    }
3493    my_data->commandBufferMap.clear();
3494}
3495
3496static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
3497    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3498                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
3499                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
3500}
3501
3502bool validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
3503    if (!pCB->activeRenderPass)
3504        return false;
3505    bool skip_call = false;
3506    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
3507        (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
3508        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3509                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3510                             "Commands cannot be called in a subpass using secondary command buffers.");
3511    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
3512        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3513                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3514                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
3515    }
3516    return skip_call;
3517}
3518
3519static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3520    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
3521        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3522                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3523                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3524    return false;
3525}
3526
3527static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3528    if (!(flags & VK_QUEUE_COMPUTE_BIT))
3529        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3530                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3531                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
3532    return false;
3533}
3534
3535static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3536    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
3537        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3538                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3539                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3540    return false;
3541}
3542
3543// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
3544//  in the recording state or if there's an issue with the Cmd ordering
3545static bool addCmd(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
3546    bool skipCall = false;
3547    auto pool_data = my_data->commandPoolMap.find(pCB->createInfo.commandPool);
3548    if (pool_data != my_data->commandPoolMap.end()) {
3549        VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pool_data->second.queueFamilyIndex].queueFlags;
3550        switch (cmd) {
3551        case CMD_BINDPIPELINE:
3552        case CMD_BINDPIPELINEDELTA:
3553        case CMD_BINDDESCRIPTORSETS:
3554        case CMD_FILLBUFFER:
3555        case CMD_CLEARCOLORIMAGE:
3556        case CMD_SETEVENT:
3557        case CMD_RESETEVENT:
3558        case CMD_WAITEVENTS:
3559        case CMD_BEGINQUERY:
3560        case CMD_ENDQUERY:
3561        case CMD_RESETQUERYPOOL:
3562        case CMD_COPYQUERYPOOLRESULTS:
3563        case CMD_WRITETIMESTAMP:
3564            skipCall |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3565            break;
3566        case CMD_SETVIEWPORTSTATE:
3567        case CMD_SETSCISSORSTATE:
3568        case CMD_SETLINEWIDTHSTATE:
3569        case CMD_SETDEPTHBIASSTATE:
3570        case CMD_SETBLENDSTATE:
3571        case CMD_SETDEPTHBOUNDSSTATE:
3572        case CMD_SETSTENCILREADMASKSTATE:
3573        case CMD_SETSTENCILWRITEMASKSTATE:
3574        case CMD_SETSTENCILREFERENCESTATE:
3575        case CMD_BINDINDEXBUFFER:
3576        case CMD_BINDVERTEXBUFFER:
3577        case CMD_DRAW:
3578        case CMD_DRAWINDEXED:
3579        case CMD_DRAWINDIRECT:
3580        case CMD_DRAWINDEXEDINDIRECT:
3581        case CMD_BLITIMAGE:
3582        case CMD_CLEARATTACHMENTS:
3583        case CMD_CLEARDEPTHSTENCILIMAGE:
3584        case CMD_RESOLVEIMAGE:
3585        case CMD_BEGINRENDERPASS:
3586        case CMD_NEXTSUBPASS:
3587        case CMD_ENDRENDERPASS:
3588            skipCall |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
3589            break;
3590        case CMD_DISPATCH:
3591        case CMD_DISPATCHINDIRECT:
3592            skipCall |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3593            break;
3594        case CMD_COPYBUFFER:
3595        case CMD_COPYIMAGE:
3596        case CMD_COPYBUFFERTOIMAGE:
3597        case CMD_COPYIMAGETOBUFFER:
3598        case CMD_CLONEIMAGEDATA:
3599        case CMD_UPDATEBUFFER:
3600        case CMD_PIPELINEBARRIER:
3601        case CMD_EXECUTECOMMANDS:
3602        case CMD_END:
3603            break;
3604        default:
3605            break;
3606        }
3607    }
3608    if (pCB->state != CB_RECORDING) {
3609        skipCall |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
3610    } else {
3611        skipCall |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
3612        CMD_NODE cmdNode = {};
3613        // init cmd node and append to end of cmd LL
3614        cmdNode.cmdNumber = ++pCB->numCmds;
3615        cmdNode.type = cmd;
3616        pCB->cmds.push_back(cmdNode);
3617    }
3618    return skipCall;
3619}
3620// Reset the command buffer state
3621//  Maintain the createInfo and set state to CB_NEW, but clear all other state
3622static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
3623    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
3624    if (pCB) {
3625        pCB->in_use.store(0);
3626        pCB->cmds.clear();
3627        // Reset CB state (note that createInfo is not cleared)
3628        pCB->commandBuffer = cb;
3629        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
3630        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
3631        pCB->numCmds = 0;
3632        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
3633        pCB->state = CB_NEW;
3634        pCB->submitCount = 0;
3635        pCB->status = 0;
3636        pCB->viewports.clear();
3637        pCB->scissors.clear();
3638
3639        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
3640            // Before clearing lastBoundState, remove any CB bindings from all uniqueBoundSets
3641            for (auto set : pCB->lastBound[i].uniqueBoundSets) {
3642                set->RemoveBoundCommandBuffer(pCB);
3643            }
3644            pCB->lastBound[i].reset();
3645        }
3646
3647        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
3648        pCB->activeRenderPass = nullptr;
3649        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
3650        pCB->activeSubpass = 0;
3651        pCB->lastSubmittedFence = VK_NULL_HANDLE;
3652        pCB->lastSubmittedQueue = VK_NULL_HANDLE;
3653        pCB->destroyedSets.clear();
3654        pCB->updatedSets.clear();
3655        pCB->destroyedFramebuffers.clear();
3656        pCB->waitedEvents.clear();
3657        pCB->semaphores.clear();
3658        pCB->events.clear();
3659        pCB->waitedEventsBeforeQueryReset.clear();
3660        pCB->queryToStateMap.clear();
3661        pCB->activeQueries.clear();
3662        pCB->startedQueries.clear();
3663        pCB->imageSubresourceMap.clear();
3664        pCB->imageLayoutMap.clear();
3665        pCB->eventToStageMap.clear();
3666        pCB->drawData.clear();
3667        pCB->currentDrawData.buffers.clear();
3668        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
3669        // Make sure any secondaryCommandBuffers are removed from globalInFlight
3670        for (auto secondary_cb : pCB->secondaryCommandBuffers) {
3671            dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
3672        }
3673        pCB->secondaryCommandBuffers.clear();
3674        pCB->updateImages.clear();
3675        pCB->updateBuffers.clear();
3676        clear_cmd_buf_and_mem_references(dev_data, pCB);
3677        pCB->eventUpdates.clear();
3678        pCB->queryUpdates.clear();
3679
3680        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
3681        for (auto framebuffer : pCB->framebuffers) {
3682            auto fbNode = getFramebuffer(dev_data, framebuffer);
3683            if (fbNode)
3684                fbNode->referencingCmdBuffers.erase(pCB->commandBuffer);
3685        }
3686        pCB->framebuffers.clear();
3687        pCB->activeFramebuffer = VK_NULL_HANDLE;
3688    }
3689}
3690
3691// Set PSO-related status bits for CB, including dynamic state set via PSO
3692static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
3693    // Account for any dynamic state not set via this PSO
3694    if (!pPipe->graphicsPipelineCI.pDynamicState ||
3695        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static
3696        pCB->status = CBSTATUS_ALL;
3697    } else {
3698        // First consider all state on
3699        // Then unset any state that's noted as dynamic in PSO
3700        // Finally OR that into CB statemask
3701        CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
3702        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
3703            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
3704            case VK_DYNAMIC_STATE_VIEWPORT:
3705                psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET;
3706                break;
3707            case VK_DYNAMIC_STATE_SCISSOR:
3708                psoDynStateMask &= ~CBSTATUS_SCISSOR_SET;
3709                break;
3710            case VK_DYNAMIC_STATE_LINE_WIDTH:
3711                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
3712                break;
3713            case VK_DYNAMIC_STATE_DEPTH_BIAS:
3714                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
3715                break;
3716            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
3717                psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
3718                break;
3719            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
3720                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
3721                break;
3722            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
3723                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
3724                break;
3725            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
3726                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
3727                break;
3728            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
3729                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
3730                break;
3731            default:
3732                // TODO : Flag error here
3733                break;
3734            }
3735        }
3736        pCB->status |= psoDynStateMask;
3737    }
3738}
3739
3740// Print the last bound Gfx Pipeline
3741static bool printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
3742    bool skipCall = false;
3743    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
3744    if (pCB) {
3745        PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
3746        if (!pPipeTrav) {
3747            // nothing to print
3748        } else {
3749            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3750                                __LINE__, DRAWSTATE_NONE, "DS", "%s",
3751                                vk_print_vkgraphicspipelinecreateinfo(
3752                                    reinterpret_cast<const VkGraphicsPipelineCreateInfo *>(&pPipeTrav->graphicsPipelineCI), "{DS}")
3753                                    .c_str());
3754        }
3755    }
3756    return skipCall;
3757}
3758
3759static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
3760    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
3761    if (pCB && pCB->cmds.size() > 0) {
3762        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3763                DRAWSTATE_NONE, "DS", "Cmds in CB 0x%p", (void *)cb);
3764        vector<CMD_NODE> cmds = pCB->cmds;
3765        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
3766            // TODO : Need to pass cb as srcObj here
3767            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
3768                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD 0x%" PRIx64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
3769        }
3770    } else {
3771        // Nothing to print
3772    }
3773}
3774
3775static bool synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
3776    bool skipCall = false;
3777    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
3778        return skipCall;
3779    }
3780    skipCall |= printPipeline(my_data, cb);
3781    return skipCall;
3782}
3783
3784// Flags validation error if the associated call is made inside a render pass. The apiName
3785// routine should ONLY be called outside a render pass.
3786static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
3787    bool inside = false;
3788    if (pCB->activeRenderPass) {
3789        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3790                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
3791                         "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 ")", apiName,
3792                         (uint64_t)pCB->activeRenderPass->renderPass);
3793    }
3794    return inside;
3795}
3796
3797// Flags validation error if the associated call is made outside a render pass. The apiName
3798// routine should ONLY be called inside a render pass.
3799static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
3800    bool outside = false;
3801    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
3802        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
3803         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
3804        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3805                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
3806                          "%s: This call must be issued inside an active render pass.", apiName);
3807    }
3808    return outside;
3809}
3810
3811static void init_core_validation(layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
3812
3813    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
3814
3815}
3816
3817VKAPI_ATTR VkResult VKAPI_CALL
3818CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
3819    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
3820
3821    assert(chain_info->u.pLayerInfo);
3822    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
3823    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
3824    if (fpCreateInstance == NULL)
3825        return VK_ERROR_INITIALIZATION_FAILED;
3826
3827    // Advance the link info for the next element on the chain
3828    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
3829
3830    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
3831    if (result != VK_SUCCESS)
3832        return result;
3833
3834    layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
3835    instance_data->instance = *pInstance;
3836    instance_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
3837    layer_init_instance_dispatch_table(*pInstance, instance_data->instance_dispatch_table, fpGetInstanceProcAddr);
3838
3839    instance_data->report_data =
3840        debug_report_create_instance(instance_data->instance_dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount,
3841                                     pCreateInfo->ppEnabledExtensionNames);
3842
3843    init_core_validation(instance_data, pAllocator);
3844
3845    ValidateLayerOrdering(*pCreateInfo);
3846
3847    return result;
3848}
3849
3850/* hook DestroyInstance to remove tableInstanceMap entry */
3851VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
3852    // TODOSC : Shouldn't need any customization here
3853    dispatch_key key = get_dispatch_key(instance);
3854    // TBD: Need any locking this early, in case this function is called at the
3855    // same time by more than one thread?
3856    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
3857    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
3858    pTable->DestroyInstance(instance, pAllocator);
3859
3860    std::lock_guard<std::mutex> lock(global_lock);
3861    // Clean up logging callback, if any
3862    while (my_data->logging_callback.size() > 0) {
3863        VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
3864        layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
3865        my_data->logging_callback.pop_back();
3866    }
3867
3868    layer_debug_report_destroy_instance(my_data->report_data);
3869    delete my_data->instance_dispatch_table;
3870    layer_data_map.erase(key);
3871}
3872
3873static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
3874    uint32_t i;
3875    // TBD: Need any locking, in case this function is called at the same time
3876    // by more than one thread?
3877    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
3878    dev_data->device_extensions.wsi_enabled = false;
3879
3880    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
3881    PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
3882    pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
3883    pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
3884    pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
3885    pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
3886    pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
3887
3888    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
3889        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
3890            dev_data->device_extensions.wsi_enabled = true;
3891    }
3892}
3893
3894VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
3895                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
3896    layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
3897    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
3898
3899    assert(chain_info->u.pLayerInfo);
3900    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
3901    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
3902    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice");
3903    if (fpCreateDevice == NULL) {
3904        return VK_ERROR_INITIALIZATION_FAILED;
3905    }
3906
3907    // Advance the link info for the next element on the chain
3908    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
3909
3910    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
3911    if (result != VK_SUCCESS) {
3912        return result;
3913    }
3914
3915    std::unique_lock<std::mutex> lock(global_lock);
3916    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
3917
3918    // Setup device dispatch table
3919    my_device_data->device_dispatch_table = new VkLayerDispatchTable;
3920    layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
3921    my_device_data->device = *pDevice;
3922
3923    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
3924    createDeviceRegisterExtensions(pCreateInfo, *pDevice);
3925    // Get physical device limits for this device
3926    my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties));
3927    uint32_t count;
3928    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
3929    my_device_data->phys_dev_properties.queue_family_properties.resize(count);
3930    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
3931        gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]);
3932    // TODO: device limits should make sure these are compatible
3933    if (pCreateInfo->pEnabledFeatures) {
3934        my_device_data->phys_dev_properties.features = *pCreateInfo->pEnabledFeatures;
3935    } else {
3936        memset(&my_device_data->phys_dev_properties.features, 0, sizeof(VkPhysicalDeviceFeatures));
3937    }
3938    // Store physical device mem limits into device layer_data struct
3939    my_instance_data->instance_dispatch_table->GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props);
3940    lock.unlock();
3941
3942    ValidateLayerOrdering(*pCreateInfo);
3943
3944    return result;
3945}
3946
3947// prototype
3948static void deleteRenderPasses(layer_data *);
3949VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
3950    // TODOSC : Shouldn't need any customization here
3951    dispatch_key key = get_dispatch_key(device);
3952    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
3953    // Free all the memory
3954    std::unique_lock<std::mutex> lock(global_lock);
3955    deletePipelines(dev_data);
3956    deleteRenderPasses(dev_data);
3957    deleteCommandBuffers(dev_data);
3958    // This will also delete all sets in the pool & remove them from setMap
3959    deletePools(dev_data);
3960    // All sets should be removed
3961    assert(dev_data->setMap.empty());
3962    for (auto del_layout : dev_data->descriptorSetLayoutMap) {
3963        delete del_layout.second;
3964    }
3965    dev_data->descriptorSetLayoutMap.clear();
3966    dev_data->imageViewMap.clear();
3967    dev_data->imageMap.clear();
3968    dev_data->imageSubresourceMap.clear();
3969    dev_data->imageLayoutMap.clear();
3970    dev_data->bufferViewMap.clear();
3971    dev_data->bufferMap.clear();
3972    // Queues persist until device is destroyed
3973    dev_data->queueMap.clear();
3974    lock.unlock();
3975#if MTMERGESOURCE
3976    bool skipCall = false;
3977    lock.lock();
3978    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
3979            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
3980    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
3981            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
3982    print_mem_list(dev_data);
3983    printCBList(dev_data);
3984    // Report any memory leaks
3985    DEVICE_MEM_INFO *pInfo = NULL;
3986    if (!dev_data->memObjMap.empty()) {
3987        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
3988            pInfo = &(*ii).second;
3989            if (pInfo->allocInfo.allocationSize != 0) {
3990                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
3991                skipCall |=
3992                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3993                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK,
3994                            "MEM", "Mem Object 0x%" PRIx64 " has not been freed. You should clean up this memory by calling "
3995                                   "vkFreeMemory(0x%" PRIx64 ") prior to vkDestroyDevice().",
3996                            (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
3997            }
3998        }
3999    }
4000    layer_debug_report_destroy_device(device);
4001    lock.unlock();
4002
4003#if DISPATCH_MAP_DEBUG
4004    fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
4005#endif
4006    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4007    if (!skipCall) {
4008        pDisp->DestroyDevice(device, pAllocator);
4009    }
4010#else
4011    dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
4012#endif
4013    delete dev_data->device_dispatch_table;
4014    layer_data_map.erase(key);
4015}
4016
4017static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4018
4019// This validates that the initial layout specified in the command buffer for
4020// the IMAGE is the same
4021// as the global IMAGE layout
4022static bool ValidateCmdBufImageLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4023    bool skip_call = false;
4024    for (auto cb_image_data : pCB->imageLayoutMap) {
4025        VkImageLayout imageLayout;
4026        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4027            skip_call |=
4028                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4029                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image 0x%" PRIx64 ".",
4030                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4031        } else {
4032            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4033                // TODO: Set memory invalid which is in mem_tracker currently
4034            } else if (imageLayout != cb_image_data.second.initialLayout) {
4035                if (cb_image_data.first.hasSubresource) {
4036                    skip_call |= log_msg(
4037                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4038                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4039                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") [sub-resource: aspectMask 0x%X array layer %u, mip level %u], "
4040                        "with layout %s when first use is %s.",
4041                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.aspectMask,
4042                                cb_image_data.first.subresource.arrayLayer,
4043                                cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout),
4044                        string_VkImageLayout(cb_image_data.second.initialLayout));
4045                } else {
4046                    skip_call |= log_msg(
4047                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4048                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4049                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") with layout %s when "
4050                        "first use is %s.",
4051                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4052                        string_VkImageLayout(cb_image_data.second.initialLayout));
4053                }
4054            }
4055            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4056        }
4057    }
4058    return skip_call;
4059}
4060
4061// Track which resources are in-flight by atomically incrementing their "in_use" count
4062static bool validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB) {
4063    bool skip_call = false;
4064    for (auto drawDataElement : pCB->drawData) {
4065        for (auto buffer : drawDataElement.buffers) {
4066            auto buffer_data = my_data->bufferMap.find(buffer);
4067            if (buffer_data == my_data->bufferMap.end()) {
4068                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4069                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4070                                     "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer));
4071            } else {
4072                buffer_data->second.in_use.fetch_add(1);
4073            }
4074        }
4075    }
4076    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4077        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4078            if (!my_data->setMap.count(set->GetSet())) {
4079                skip_call |=
4080                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4081                            (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS",
4082                            "Cannot submit cmd buffer using deleted descriptor set 0x%" PRIx64 ".", (uint64_t)(set));
4083            } else {
4084                set->in_use.fetch_add(1);
4085            }
4086        }
4087    }
4088    for (auto semaphore : pCB->semaphores) {
4089        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4090        if (semaphoreNode == my_data->semaphoreMap.end()) {
4091            skip_call |=
4092                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4093                        reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4094                        "Cannot submit cmd buffer using deleted semaphore 0x%" PRIx64 ".", reinterpret_cast<uint64_t &>(semaphore));
4095        } else {
4096            semaphoreNode->second.in_use.fetch_add(1);
4097        }
4098    }
4099    for (auto event : pCB->events) {
4100        auto eventNode = my_data->eventMap.find(event);
4101        if (eventNode == my_data->eventMap.end()) {
4102            skip_call |=
4103                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4104                        reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4105                        "Cannot submit cmd buffer using deleted event 0x%" PRIx64 ".", reinterpret_cast<uint64_t &>(event));
4106        } else {
4107            eventNode->second.in_use.fetch_add(1);
4108        }
4109    }
4110    for (auto event : pCB->writeEventsBeforeWait) {
4111        auto eventNode = my_data->eventMap.find(event);
4112        eventNode->second.write_in_use++;
4113    }
4114    return skip_call;
4115}
4116
4117// Note: This function assumes that the global lock is held by the calling
4118// thread.
4119static bool cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4120    bool skip_call = false;
4121    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4122    if (pCB) {
4123        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
4124            for (auto event : queryEventsPair.second) {
4125                if (my_data->eventMap[event].needsSignaled) {
4126                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4127                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
4128                                         "Cannot get query results on queryPool 0x%" PRIx64
4129                                         " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
4130                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
4131                }
4132            }
4133        }
4134    }
4135    return skip_call;
4136}
4137// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
4138static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
4139    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
4140    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer);
4141    pCB->in_use.fetch_sub(1);
4142    if (!pCB->in_use.load()) {
4143        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
4144    }
4145}
4146
4147static void decrementResources(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4148    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4149    for (auto drawDataElement : pCB->drawData) {
4150        for (auto buffer : drawDataElement.buffers) {
4151            auto buffer_data = my_data->bufferMap.find(buffer);
4152            if (buffer_data != my_data->bufferMap.end()) {
4153                buffer_data->second.in_use.fetch_sub(1);
4154            }
4155        }
4156    }
4157    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4158        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4159            set->in_use.fetch_sub(1);
4160        }
4161    }
4162    for (auto semaphore : pCB->semaphores) {
4163        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4164        if (semaphoreNode != my_data->semaphoreMap.end()) {
4165            semaphoreNode->second.in_use.fetch_sub(1);
4166        }
4167    }
4168    for (auto event : pCB->events) {
4169        auto eventNode = my_data->eventMap.find(event);
4170        if (eventNode != my_data->eventMap.end()) {
4171            eventNode->second.in_use.fetch_sub(1);
4172        }
4173    }
4174    for (auto event : pCB->writeEventsBeforeWait) {
4175        auto eventNode = my_data->eventMap.find(event);
4176        if (eventNode != my_data->eventMap.end()) {
4177            eventNode->second.write_in_use--;
4178        }
4179    }
4180    for (auto queryStatePair : pCB->queryToStateMap) {
4181        my_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4182    }
4183    for (auto eventStagePair : pCB->eventToStageMap) {
4184        my_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4185    }
4186}
4187// For fenceCount fences in pFences, mark fence signaled, decrement in_use, and call
4188//  decrementResources for all priorFences and cmdBuffers associated with fence.
4189static bool decrementResources(layer_data *my_data, uint32_t fenceCount, const VkFence *pFences) {
4190    bool skip_call = false;
4191    std::vector<std::pair<VkFence, FENCE_NODE *>> fence_pairs;
4192    for (uint32_t i = 0; i < fenceCount; ++i) {
4193        auto fence_data = my_data->fenceMap.find(pFences[i]);
4194        if (fence_data == my_data->fenceMap.end() || !fence_data->second.needsSignaled)
4195            return skip_call;
4196        fence_data->second.needsSignaled = false;
4197        if (fence_data->second.in_use.load()) {
4198            fence_pairs.push_back(std::make_pair(fence_data->first, &fence_data->second));
4199            fence_data->second.in_use.fetch_sub(1);
4200        }
4201        decrementResources(my_data, static_cast<uint32_t>(fence_data->second.priorFences.size()),
4202                           fence_data->second.priorFences.data());
4203        for (auto cmdBuffer : fence_data->second.cmdBuffers) {
4204            decrementResources(my_data, cmdBuffer);
4205            skip_call |= cleanInFlightCmdBuffer(my_data, cmdBuffer);
4206            removeInFlightCmdBuffer(my_data, cmdBuffer);
4207        }
4208        fence_data->second.cmdBuffers.clear();
4209        fence_data->second.priorFences.clear();
4210    }
4211    for (auto fence_pair : fence_pairs) {
4212        for (auto queue : fence_pair.second->queues) {
4213            auto queue_pair = my_data->queueMap.find(queue);
4214            if (queue_pair != my_data->queueMap.end()) {
4215                auto last_fence_data =
4216                    std::find(queue_pair->second.lastFences.begin(), queue_pair->second.lastFences.end(), fence_pair.first);
4217                if (last_fence_data != queue_pair->second.lastFences.end())
4218                    queue_pair->second.lastFences.erase(last_fence_data);
4219            }
4220        }
4221        for (auto& fence_data : my_data->fenceMap) {
4222          auto prior_fence_data =
4223              std::find(fence_data.second.priorFences.begin(), fence_data.second.priorFences.end(), fence_pair.first);
4224          if (prior_fence_data != fence_data.second.priorFences.end())
4225              fence_data.second.priorFences.erase(prior_fence_data);
4226        }
4227    }
4228    return skip_call;
4229}
4230// Decrement in_use for all outstanding cmd buffers that were submitted on this queue
4231static bool decrementResources(layer_data *my_data, VkQueue queue) {
4232    bool skip_call = false;
4233    auto queue_data = my_data->queueMap.find(queue);
4234    if (queue_data != my_data->queueMap.end()) {
4235        for (auto cmdBuffer : queue_data->second.untrackedCmdBuffers) {
4236            decrementResources(my_data, cmdBuffer);
4237            skip_call |= cleanInFlightCmdBuffer(my_data, cmdBuffer);
4238            removeInFlightCmdBuffer(my_data, cmdBuffer);
4239        }
4240        queue_data->second.untrackedCmdBuffers.clear();
4241        skip_call |= decrementResources(my_data, static_cast<uint32_t>(queue_data->second.lastFences.size()),
4242                                        queue_data->second.lastFences.data());
4243    }
4244    return skip_call;
4245}
4246
4247// This function merges command buffer tracking between queues when there is a semaphore dependency
4248// between them (see below for details as to how tracking works). When this happens, the prior
4249// fences from the signaling queue are merged into the wait queue as well as any untracked command
4250// buffers.
4251static void updateTrackedCommandBuffers(layer_data *dev_data, VkQueue queue, VkQueue other_queue, VkFence fence) {
4252    if (queue == other_queue) {
4253        return;
4254    }
4255    auto queue_data = dev_data->queueMap.find(queue);
4256    auto other_queue_data = dev_data->queueMap.find(other_queue);
4257    if (queue_data == dev_data->queueMap.end() || other_queue_data == dev_data->queueMap.end()) {
4258        return;
4259    }
4260    for (auto fenceInner : other_queue_data->second.lastFences) {
4261        queue_data->second.lastFences.push_back(fenceInner);
4262        auto fence_node = dev_data->fenceMap.find(fenceInner);
4263        if (fence_node != dev_data->fenceMap.end()) {
4264            fence_node->second.queues.insert(other_queue_data->first);
4265        }
4266    }
4267    if (fence != VK_NULL_HANDLE) {
4268        auto fence_data = dev_data->fenceMap.find(fence);
4269        if (fence_data == dev_data->fenceMap.end()) {
4270            return;
4271        }
4272        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
4273            fence_data->second.cmdBuffers.push_back(cmdbuffer);
4274        }
4275        other_queue_data->second.untrackedCmdBuffers.clear();
4276    } else {
4277        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
4278            queue_data->second.untrackedCmdBuffers.push_back(cmdbuffer);
4279        }
4280        other_queue_data->second.untrackedCmdBuffers.clear();
4281    }
4282    for (auto eventStagePair : other_queue_data->second.eventToStageMap) {
4283        queue_data->second.eventToStageMap[eventStagePair.first] = eventStagePair.second;
4284    }
4285    for (auto queryStatePair : other_queue_data->second.queryToStateMap) {
4286        queue_data->second.queryToStateMap[queryStatePair.first] = queryStatePair.second;
4287    }
4288}
4289
4290// This is the core function for tracking command buffers. There are two primary ways command
4291// buffers are tracked. When submitted they are stored in the command buffer list associated
4292// with a fence or the untracked command buffer list associated with a queue if no fence is used.
4293// Each queue also stores the last fence that was submitted onto the queue. This allows us to
4294// create a linked list of fences and their associated command buffers so if one fence is
4295// waited on, prior fences on that queue are also considered to have been waited on. When a fence is
4296// waited on (either via a queue, device or fence), we free the cmd buffers for that fence and
4297// recursively call with the prior fences.
4298static void trackCommandBuffers(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
4299                                VkFence fence) {
4300    auto queue_data = my_data->queueMap.find(queue);
4301    if (fence != VK_NULL_HANDLE) {
4302        vector<VkFence> prior_fences;
4303        auto fence_data = my_data->fenceMap.find(fence);
4304        if (fence_data == my_data->fenceMap.end()) {
4305            return;
4306        }
4307        fence_data->second.cmdBuffers.clear();
4308        if (queue_data != my_data->queueMap.end()) {
4309            prior_fences = queue_data->second.lastFences;
4310            queue_data->second.lastFences.clear();
4311            queue_data->second.lastFences.push_back(fence);
4312            for (auto cmdbuffer : queue_data->second.untrackedCmdBuffers) {
4313                fence_data->second.cmdBuffers.push_back(cmdbuffer);
4314            }
4315            queue_data->second.untrackedCmdBuffers.clear();
4316        }
4317        fence_data->second.priorFences = prior_fences;
4318        fence_data->second.needsSignaled = true;
4319        fence_data->second.queues.insert(queue);
4320        fence_data->second.in_use.fetch_add(1);
4321        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4322            const VkSubmitInfo *submit = &pSubmits[submit_idx];
4323            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
4324                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
4325                    fence_data->second.cmdBuffers.push_back(secondaryCmdBuffer);
4326                }
4327                fence_data->second.cmdBuffers.push_back(submit->pCommandBuffers[i]);
4328            }
4329        }
4330    } else {
4331        if (queue_data != my_data->queueMap.end()) {
4332            for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4333                const VkSubmitInfo *submit = &pSubmits[submit_idx];
4334                for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
4335                    for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
4336                        queue_data->second.untrackedCmdBuffers.push_back(secondaryCmdBuffer);
4337                    }
4338                    queue_data->second.untrackedCmdBuffers.push_back(submit->pCommandBuffers[i]);
4339                }
4340            }
4341        }
4342    }
4343}
4344
4345static void markCommandBuffersInFlight(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
4346                                       VkFence fence) {
4347    auto queue_data = my_data->queueMap.find(queue);
4348    if (queue_data != my_data->queueMap.end()) {
4349        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4350            const VkSubmitInfo *submit = &pSubmits[submit_idx];
4351            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
4352                // Add cmdBuffers to the global set and increment count
4353                GLOBAL_CB_NODE *pCB = getCBNode(my_data, submit->pCommandBuffers[i]);
4354                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
4355                    my_data->globalInFlightCmdBuffers.insert(secondaryCmdBuffer);
4356                    GLOBAL_CB_NODE *pSubCB = getCBNode(my_data, secondaryCmdBuffer);
4357                    pSubCB->in_use.fetch_add(1);
4358                }
4359                my_data->globalInFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
4360                pCB->in_use.fetch_add(1);
4361            }
4362        }
4363    }
4364}
4365
4366static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4367    bool skip_call = false;
4368    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
4369        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4370        skip_call |=
4371            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4372                    __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
4373                    "Command Buffer 0x%" PRIx64 " is already in use and is not marked for simultaneous use.",
4374                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
4375    }
4376    return skip_call;
4377}
4378
4379static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4380    bool skipCall = false;
4381    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
4382    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
4383        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4384                            __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4385                            "CB 0x%" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
4386                            "set, but has been submitted 0x%" PRIxLEAST64 " times.",
4387                            (uint64_t)(pCB->commandBuffer), pCB->submitCount);
4388    }
4389    // Validate that cmd buffers have been updated
4390    if (CB_RECORDED != pCB->state) {
4391        if (CB_INVALID == pCB->state) {
4392            // Inform app of reason CB invalid
4393            bool causeReported = false;
4394            if (!pCB->destroyedSets.empty()) {
4395                std::stringstream set_string;
4396                for (auto set : pCB->destroyedSets)
4397                    set_string << " " << set;
4398
4399                skipCall |=
4400                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4401                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4402                            "You are submitting command buffer 0x%" PRIxLEAST64
4403                            " that is invalid because it had the following bound descriptor set(s) destroyed: %s",
4404                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
4405                causeReported = true;
4406            }
4407            if (!pCB->updatedSets.empty()) {
4408                std::stringstream set_string;
4409                for (auto set : pCB->updatedSets)
4410                    set_string << " " << set;
4411
4412                skipCall |=
4413                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4414                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4415                            "You are submitting command buffer 0x%" PRIxLEAST64
4416                            " that is invalid because it had the following bound descriptor set(s) updated: %s",
4417                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
4418                causeReported = true;
4419            }
4420            if (!pCB->destroyedFramebuffers.empty()) {
4421                std::stringstream fb_string;
4422                for (auto fb : pCB->destroyedFramebuffers)
4423                    fb_string << " " << fb;
4424
4425                skipCall |=
4426                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4427                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4428                            "You are submitting command buffer 0x%" PRIxLEAST64 " that is invalid because it had the following "
4429                            "referenced framebuffers destroyed: %s",
4430                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), fb_string.str().c_str());
4431                causeReported = true;
4432            }
4433            // TODO : This is defensive programming to make sure an error is
4434            //  flagged if we hit this INVALID cmd buffer case and none of the
4435            //  above cases are hit. As the number of INVALID cases grows, this
4436            //  code should be updated to seemlessly handle all the cases.
4437            if (!causeReported) {
4438                skipCall |= log_msg(
4439                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4440                    reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4441                    "You are submitting command buffer 0x%" PRIxLEAST64 " that is invalid due to an unknown cause. Validation "
4442                    "should "
4443                    "be improved to report the exact cause.",
4444                    reinterpret_cast<uint64_t &>(pCB->commandBuffer));
4445            }
4446        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
4447            skipCall |=
4448                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4449                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
4450                        "You must call vkEndCommandBuffer() on CB 0x%" PRIxLEAST64 " before this call to vkQueueSubmit()!",
4451                        (uint64_t)(pCB->commandBuffer));
4452        }
4453    }
4454    return skipCall;
4455}
4456
4457static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4458    // Track in-use for resources off of primary and any secondary CBs
4459    bool skipCall = validateAndIncrementResources(dev_data, pCB);
4460    if (!pCB->secondaryCommandBuffers.empty()) {
4461        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
4462            skipCall |= validateAndIncrementResources(dev_data, dev_data->commandBufferMap[secondaryCmdBuffer]);
4463            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
4464            if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
4465                !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4466                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4467                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4468                        "CB 0x%" PRIxLEAST64 " was submitted with secondary buffer 0x%" PRIxLEAST64
4469                        " but that buffer has subsequently been bound to "
4470                        "primary cmd buffer 0x%" PRIxLEAST64
4471                        " and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
4472                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
4473                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
4474            }
4475        }
4476    }
4477    skipCall |= validateCommandBufferState(dev_data, pCB);
4478    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
4479    // on device
4480    skipCall |= validateCommandBufferSimultaneousUse(dev_data, pCB);
4481    return skipCall;
4482}
4483
4484VKAPI_ATTR VkResult VKAPI_CALL
4485QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
4486    bool skipCall = false;
4487    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4488    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4489    std::unique_lock<std::mutex> lock(global_lock);
4490    // First verify that fence is not in use
4491    if (fence != VK_NULL_HANDLE) {
4492        if ((submitCount != 0) && dev_data->fenceMap[fence].in_use.load()) {
4493            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4494                                (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4495                                "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(fence));
4496        }
4497        if (!dev_data->fenceMap[fence].needsSignaled) {
4498            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4499                                reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4500                                "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
4501                                reinterpret_cast<uint64_t &>(fence));
4502        }
4503    }
4504    // TODO : Review these old print functions and clean up as appropriate
4505    print_mem_list(dev_data);
4506    printCBList(dev_data);
4507    // Update cmdBuffer-related data structs and mark fence in-use
4508    trackCommandBuffers(dev_data, queue, submitCount, pSubmits, fence);
4509    // Now verify each individual submit
4510    std::unordered_set<VkQueue> processed_other_queues;
4511    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4512        const VkSubmitInfo *submit = &pSubmits[submit_idx];
4513        vector<VkSemaphore> semaphoreList;
4514        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
4515            const VkSemaphore &semaphore = submit->pWaitSemaphores[i];
4516            semaphoreList.push_back(semaphore);
4517            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
4518                if (dev_data->semaphoreMap[semaphore].signaled) {
4519                    dev_data->semaphoreMap[semaphore].signaled = false;
4520                } else {
4521                    skipCall |=
4522                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4523                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4524                                "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
4525                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
4526                }
4527                const VkQueue &other_queue = dev_data->semaphoreMap[semaphore].queue;
4528                if (other_queue != VK_NULL_HANDLE && !processed_other_queues.count(other_queue)) {
4529                    updateTrackedCommandBuffers(dev_data, queue, other_queue, fence);
4530                    processed_other_queues.insert(other_queue);
4531                }
4532            }
4533        }
4534        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
4535            const VkSemaphore &semaphore = submit->pSignalSemaphores[i];
4536            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
4537                semaphoreList.push_back(semaphore);
4538                if (dev_data->semaphoreMap[semaphore].signaled) {
4539                    skipCall |=
4540                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4541                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4542                                "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
4543                                " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
4544                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
4545                                reinterpret_cast<uint64_t &>(dev_data->semaphoreMap[semaphore].queue));
4546                } else {
4547                    dev_data->semaphoreMap[semaphore].signaled = true;
4548                    dev_data->semaphoreMap[semaphore].queue = queue;
4549                }
4550            }
4551        }
4552        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
4553            auto pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
4554            skipCall |= ValidateCmdBufImageLayouts(dev_data, pCBNode);
4555            if (pCBNode) {
4556                pCBNode->semaphores = semaphoreList;
4557                pCBNode->submitCount++; // increment submit count
4558                pCBNode->lastSubmittedFence = fence;
4559                pCBNode->lastSubmittedQueue = queue;
4560                skipCall |= validatePrimaryCommandBufferState(dev_data, pCBNode);
4561                // Call submit-time functions to validate/update state
4562                for (auto &function : pCBNode->validate_functions) {
4563                    skipCall |= function();
4564                }
4565                for (auto &function : pCBNode->eventUpdates) {
4566                    skipCall |= function(queue);
4567                }
4568                for (auto &function : pCBNode->queryUpdates) {
4569                    skipCall |= function(queue);
4570                }
4571            }
4572        }
4573    }
4574    markCommandBuffersInFlight(dev_data, queue, submitCount, pSubmits, fence);
4575    lock.unlock();
4576    if (!skipCall)
4577        result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence);
4578
4579    return result;
4580}
4581
4582VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
4583                                              const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
4584    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4585    VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
4586    // TODO : Track allocations and overall size here
4587    std::lock_guard<std::mutex> lock(global_lock);
4588    add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
4589    print_mem_list(my_data);
4590    return result;
4591}
4592
4593VKAPI_ATTR void VKAPI_CALL
4594FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
4595    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4596
4597    // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
4598    // Before freeing a memory object, an application must ensure the memory object is no longer
4599    // in use by the device—for example by command buffers queued for execution. The memory need
4600    // not yet be unbound from all images and buffers, but any further use of those images or
4601    // buffers (on host or device) for anything other than destroying those objects will result in
4602    // undefined behavior.
4603
4604    std::unique_lock<std::mutex> lock(global_lock);
4605    freeMemObjInfo(my_data, device, mem, false);
4606    print_mem_list(my_data);
4607    printCBList(my_data);
4608    lock.unlock();
4609    my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator);
4610}
4611
4612static bool validateMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
4613    bool skipCall = false;
4614
4615    if (size == 0) {
4616        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4617                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4618                           "VkMapMemory: Attempting to map memory range of size zero");
4619    }
4620
4621    auto mem_element = my_data->memObjMap.find(mem);
4622    if (mem_element != my_data->memObjMap.end()) {
4623        // It is an application error to call VkMapMemory on an object that is already mapped
4624        if (mem_element->second.memRange.size != 0) {
4625            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4626                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4627                               "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem);
4628        }
4629
4630        // Validate that offset + size is within object's allocationSize
4631        if (size == VK_WHOLE_SIZE) {
4632            if (offset >= mem_element->second.allocInfo.allocationSize) {
4633                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4634                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
4635                                   "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64, offset,
4636                                   mem_element->second.allocInfo.allocationSize, mem_element->second.allocInfo.allocationSize);
4637            }
4638        } else {
4639            if ((offset + size) > mem_element->second.allocInfo.allocationSize) {
4640                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4641                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
4642                                   "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64, offset,
4643                                   size + offset, mem_element->second.allocInfo.allocationSize);
4644            }
4645        }
4646    }
4647    return skipCall;
4648}
4649
4650static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
4651    auto mem_element = my_data->memObjMap.find(mem);
4652    if (mem_element != my_data->memObjMap.end()) {
4653        MemRange new_range;
4654        new_range.offset = offset;
4655        new_range.size = size;
4656        mem_element->second.memRange = new_range;
4657    }
4658}
4659
4660static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
4661    bool skipCall = false;
4662    auto mem_element = my_data->memObjMap.find(mem);
4663    if (mem_element != my_data->memObjMap.end()) {
4664        if (!mem_element->second.memRange.size) {
4665            // Valid Usage: memory must currently be mapped
4666            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4667                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4668                               "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
4669        }
4670        mem_element->second.memRange.size = 0;
4671        if (mem_element->second.pData) {
4672            free(mem_element->second.pData);
4673            mem_element->second.pData = 0;
4674        }
4675    }
4676    return skipCall;
4677}
4678
4679static char NoncoherentMemoryFillValue = 0xb;
4680
4681static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize size, void **ppData) {
4682    auto mem_element = dev_data->memObjMap.find(mem);
4683    if (mem_element != dev_data->memObjMap.end()) {
4684        mem_element->second.pDriverData = *ppData;
4685        uint32_t index = mem_element->second.allocInfo.memoryTypeIndex;
4686        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
4687            mem_element->second.pData = 0;
4688        } else {
4689            if (size == VK_WHOLE_SIZE) {
4690                size = mem_element->second.allocInfo.allocationSize;
4691            }
4692            size_t convSize = (size_t)(size);
4693            mem_element->second.pData = malloc(2 * convSize);
4694            memset(mem_element->second.pData, NoncoherentMemoryFillValue, 2 * convSize);
4695            *ppData = static_cast<char *>(mem_element->second.pData) + (convSize / 2);
4696        }
4697    }
4698}
4699// Verify that state for fence being waited on is appropriate. That is,
4700//  a fence being waited on should not already be signalled and
4701//  it should have been submitted on a queue or during acquire next image
4702static inline bool verifyWaitFenceState(VkDevice device, VkFence fence, const char *apiCall) {
4703    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4704    bool skipCall = false;
4705    auto pFenceInfo = my_data->fenceMap.find(fence);
4706    if (pFenceInfo != my_data->fenceMap.end()) {
4707        if (!pFenceInfo->second.firstTimeFlag) {
4708            if (!pFenceInfo->second.needsSignaled) {
4709                skipCall |=
4710                    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4711                            (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4712                            "%s specified fence 0x%" PRIxLEAST64 " already in SIGNALED state.", apiCall, (uint64_t)fence);
4713            }
4714            if (pFenceInfo->second.queues.empty() && !pFenceInfo->second.swapchain) { // Checking status of unsubmitted fence
4715                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4716                                    reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4717                                    "%s called for fence 0x%" PRIxLEAST64 " which has not been submitted on a Queue or during "
4718                                    "acquire next image.",
4719                                    apiCall, reinterpret_cast<uint64_t &>(fence));
4720            }
4721        } else {
4722            pFenceInfo->second.firstTimeFlag = false;
4723        }
4724    }
4725    return skipCall;
4726}
4727
4728VKAPI_ATTR VkResult VKAPI_CALL
4729WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
4730    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4731    bool skip_call = false;
4732    // Verify fence status of submitted fences
4733    std::unique_lock<std::mutex> lock(global_lock);
4734    for (uint32_t i = 0; i < fenceCount; i++) {
4735        skip_call |= verifyWaitFenceState(device, pFences[i], "vkWaitForFences");
4736    }
4737    lock.unlock();
4738    if (skip_call)
4739        return VK_ERROR_VALIDATION_FAILED_EXT;
4740
4741    VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
4742
4743    if (result == VK_SUCCESS) {
4744        lock.lock();
4745        // When we know that all fences are complete we can clean/remove their CBs
4746        if (waitAll || fenceCount == 1) {
4747            skip_call |= decrementResources(dev_data, fenceCount, pFences);
4748        }
4749        // NOTE : Alternate case not handled here is when some fences have completed. In
4750        //  this case for app to guarantee which fences completed it will have to call
4751        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
4752        lock.unlock();
4753    }
4754    if (skip_call)
4755        return VK_ERROR_VALIDATION_FAILED_EXT;
4756    return result;
4757}
4758
4759VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
4760    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4761    bool skipCall = false;
4762    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4763    std::unique_lock<std::mutex> lock(global_lock);
4764    skipCall = verifyWaitFenceState(device, fence, "vkGetFenceStatus");
4765    lock.unlock();
4766
4767    if (skipCall)
4768        return result;
4769
4770    result = dev_data->device_dispatch_table->GetFenceStatus(device, fence);
4771    bool skip_call = false;
4772    lock.lock();
4773    if (result == VK_SUCCESS) {
4774        skipCall |= decrementResources(dev_data, 1, &fence);
4775    }
4776    lock.unlock();
4777    if (skip_call)
4778        return VK_ERROR_VALIDATION_FAILED_EXT;
4779    return result;
4780}
4781
4782VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
4783                                                            VkQueue *pQueue) {
4784    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4785    dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
4786    std::lock_guard<std::mutex> lock(global_lock);
4787
4788    // Add queue to tracking set only if it is new
4789    auto result = dev_data->queues.emplace(*pQueue);
4790    if (result.second == true) {
4791        QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
4792        pQNode->device = device;
4793    }
4794}
4795
4796VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
4797    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4798    bool skip_call = false;
4799    skip_call |= decrementResources(dev_data, queue);
4800    if (skip_call)
4801        return VK_ERROR_VALIDATION_FAILED_EXT;
4802    VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue);
4803    return result;
4804}
4805
4806VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
4807    bool skip_call = false;
4808    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4809    std::unique_lock<std::mutex> lock(global_lock);
4810    for (auto queue : dev_data->queues) {
4811        skip_call |= decrementResources(dev_data, queue);
4812    }
4813    dev_data->globalInFlightCmdBuffers.clear();
4814    lock.unlock();
4815    if (skip_call)
4816        return VK_ERROR_VALIDATION_FAILED_EXT;
4817    VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device);
4818    return result;
4819}
4820
4821VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
4822    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4823    bool skipCall = false;
4824    std::unique_lock<std::mutex> lock(global_lock);
4825    auto fence_pair = dev_data->fenceMap.find(fence);
4826    if (fence_pair != dev_data->fenceMap.end()) {
4827        if (fence_pair->second.in_use.load()) {
4828            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4829                                (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4830                                "Fence 0x%" PRIx64 " is in use by a command buffer.", (uint64_t)(fence));
4831        }
4832        dev_data->fenceMap.erase(fence_pair);
4833    }
4834    lock.unlock();
4835
4836    if (!skipCall)
4837        dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
4838}
4839
4840VKAPI_ATTR void VKAPI_CALL
4841DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
4842    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4843    dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
4844    std::lock_guard<std::mutex> lock(global_lock);
4845    auto item = dev_data->semaphoreMap.find(semaphore);
4846    if (item != dev_data->semaphoreMap.end()) {
4847        if (item->second.in_use.load()) {
4848            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4849                    reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4850                    "Cannot delete semaphore 0x%" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore));
4851        }
4852        dev_data->semaphoreMap.erase(semaphore);
4853    }
4854    // TODO : Clean up any internal data structures using this obj.
4855}
4856
4857VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
4858    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4859    bool skip_call = false;
4860    std::unique_lock<std::mutex> lock(global_lock);
4861    auto event_data = dev_data->eventMap.find(event);
4862    if (event_data != dev_data->eventMap.end()) {
4863        if (event_data->second.in_use.load()) {
4864            skip_call |= log_msg(
4865                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4866                reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4867                "Cannot delete event 0x%" PRIx64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event));
4868        }
4869        dev_data->eventMap.erase(event_data);
4870    }
4871    lock.unlock();
4872    if (!skip_call)
4873        dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator);
4874    // TODO : Clean up any internal data structures using this obj.
4875}
4876
4877VKAPI_ATTR void VKAPI_CALL
4878DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
4879    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
4880        ->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
4881    // TODO : Clean up any internal data structures using this obj.
4882}
4883
4884VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
4885                                                   uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
4886                                                   VkQueryResultFlags flags) {
4887    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4888    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
4889    std::unique_lock<std::mutex> lock(global_lock);
4890    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
4891        auto pCB = getCBNode(dev_data, cmdBuffer);
4892        for (auto queryStatePair : pCB->queryToStateMap) {
4893            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
4894        }
4895    }
4896    bool skip_call = false;
4897    for (uint32_t i = 0; i < queryCount; ++i) {
4898        QueryObject query = {queryPool, firstQuery + i};
4899        auto queryElement = queriesInFlight.find(query);
4900        auto queryToStateElement = dev_data->queryToStateMap.find(query);
4901        if (queryToStateElement != dev_data->queryToStateMap.end()) {
4902            // Available and in flight
4903            if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
4904                queryToStateElement->second) {
4905                for (auto cmdBuffer : queryElement->second) {
4906                    auto pCB = getCBNode(dev_data, cmdBuffer);
4907                    auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
4908                    if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
4909                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4910                                             VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
4911                                             "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
4912                                             (uint64_t)(queryPool), firstQuery + i);
4913                    } else {
4914                        for (auto event : queryEventElement->second) {
4915                            dev_data->eventMap[event].needsSignaled = true;
4916                        }
4917                    }
4918                }
4919                // Unavailable and in flight
4920            } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
4921                       !queryToStateElement->second) {
4922                // TODO : Can there be the same query in use by multiple command buffers in flight?
4923                bool make_available = false;
4924                for (auto cmdBuffer : queryElement->second) {
4925                    auto pCB = getCBNode(dev_data, cmdBuffer);
4926                    make_available |= pCB->queryToStateMap[query];
4927                }
4928                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
4929                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4930                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
4931                                         "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
4932                                         (uint64_t)(queryPool), firstQuery + i);
4933                }
4934                // Unavailable
4935            } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
4936                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4937                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
4938                                     "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
4939                                     (uint64_t)(queryPool), firstQuery + i);
4940                // Unitialized
4941            } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
4942                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4943                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
4944                                     "Cannot get query results on queryPool 0x%" PRIx64
4945                                     " with index %d as data has not been collected for this index.",
4946                                     (uint64_t)(queryPool), firstQuery + i);
4947            }
4948        }
4949    }
4950    lock.unlock();
4951    if (skip_call)
4952        return VK_ERROR_VALIDATION_FAILED_EXT;
4953    return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride,
4954                                                                flags);
4955}
4956
4957static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
4958    bool skip_call = false;
4959    auto buffer_data = my_data->bufferMap.find(buffer);
4960    if (buffer_data == my_data->bufferMap.end()) {
4961        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4962                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
4963                             "Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
4964    } else {
4965        if (buffer_data->second.in_use.load()) {
4966            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4967                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
4968                                 "Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
4969        }
4970    }
4971    return skip_call;
4972}
4973
4974static bool print_memory_range_error(layer_data *dev_data, const uint64_t object_handle, const uint64_t other_handle,
4975                                     VkDebugReportObjectTypeEXT object_type) {
4976    if (object_type == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) {
4977        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
4978                       MEMTRACK_INVALID_ALIASING, "MEM", "Buffer 0x%" PRIx64 " is aliased with image 0x%" PRIx64, object_handle,
4979                       other_handle);
4980    } else {
4981        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
4982                       MEMTRACK_INVALID_ALIASING, "MEM", "Image 0x%" PRIx64 " is aliased with buffer 0x%" PRIx64, object_handle,
4983                       other_handle);
4984    }
4985}
4986
4987static bool validate_memory_range(layer_data *dev_data, const vector<MEMORY_RANGE> &ranges, const MEMORY_RANGE &new_range,
4988                                  VkDebugReportObjectTypeEXT object_type) {
4989    bool skip_call = false;
4990
4991    for (auto range : ranges) {
4992        if ((range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) <
4993            (new_range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)))
4994            continue;
4995        if ((range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) >
4996            (new_range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)))
4997            continue;
4998        skip_call |= print_memory_range_error(dev_data, new_range.handle, range.handle, object_type);
4999    }
5000    return skip_call;
5001}
5002
5003static MEMORY_RANGE insert_memory_ranges(uint64_t handle, VkDeviceMemory mem, VkDeviceSize memoryOffset,
5004                                         VkMemoryRequirements memRequirements, vector<MEMORY_RANGE> &ranges) {
5005    MEMORY_RANGE range;
5006    range.handle = handle;
5007    range.memory = mem;
5008    range.start = memoryOffset;
5009    range.end = memoryOffset + memRequirements.size - 1;
5010    ranges.push_back(range);
5011    return range;
5012}
5013
5014static void remove_memory_ranges(uint64_t handle, VkDeviceMemory mem, vector<MEMORY_RANGE> &ranges) {
5015    for (uint32_t item = 0; item < ranges.size(); item++) {
5016        if ((ranges[item].handle == handle) && (ranges[item].memory == mem)) {
5017            ranges.erase(ranges.begin() + item);
5018            break;
5019        }
5020    }
5021}
5022
5023VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer,
5024                                         const VkAllocationCallbacks *pAllocator) {
5025    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5026    bool skipCall = false;
5027    std::unique_lock<std::mutex> lock(global_lock);
5028    if (!validateIdleBuffer(dev_data, buffer) && !skipCall) {
5029        lock.unlock();
5030        dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator);
5031        lock.lock();
5032    }
5033    // Clean up memory binding and range information for buffer
5034    const auto &bufferEntry = dev_data->bufferMap.find(buffer);
5035    if (bufferEntry != dev_data->bufferMap.end()) {
5036        const auto &memEntry = dev_data->memObjMap.find(bufferEntry->second.mem);
5037        if (memEntry != dev_data->memObjMap.end()) {
5038            remove_memory_ranges(reinterpret_cast<uint64_t &>(buffer), bufferEntry->second.mem, memEntry->second.bufferRanges);
5039        }
5040        clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5041        dev_data->bufferMap.erase(bufferEntry);
5042    }
5043}
5044
5045VKAPI_ATTR void VKAPI_CALL
5046DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5047    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5048    dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
5049    std::lock_guard<std::mutex> lock(global_lock);
5050    auto item = dev_data->bufferViewMap.find(bufferView);
5051    if (item != dev_data->bufferViewMap.end()) {
5052        dev_data->bufferViewMap.erase(item);
5053    }
5054}
5055
5056VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5057    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5058    bool skipCall = false;
5059    if (!skipCall) {
5060        dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
5061    }
5062
5063    std::lock_guard<std::mutex> lock(global_lock);
5064    const auto &imageEntry = dev_data->imageMap.find(image);
5065    if (imageEntry != dev_data->imageMap.end()) {
5066        // Clean up memory mapping, bindings and range references for image
5067        auto memEntry = dev_data->memObjMap.find(imageEntry->second.mem);
5068        if (memEntry != dev_data->memObjMap.end()) {
5069            remove_memory_ranges(reinterpret_cast<uint64_t &>(image), imageEntry->second.mem, memEntry->second.imageRanges);
5070            clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5071            memEntry->second.image = VK_NULL_HANDLE;
5072        }
5073        // Remove image from imageMap
5074        dev_data->imageMap.erase(imageEntry);
5075    }
5076    const auto& subEntry = dev_data->imageSubresourceMap.find(image);
5077    if (subEntry != dev_data->imageSubresourceMap.end()) {
5078        for (const auto& pair : subEntry->second) {
5079            dev_data->imageLayoutMap.erase(pair);
5080        }
5081        dev_data->imageSubresourceMap.erase(subEntry);
5082    }
5083}
5084
5085VKAPI_ATTR VkResult VKAPI_CALL
5086BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5087    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5088    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5089    std::unique_lock<std::mutex> lock(global_lock);
5090    // Track objects tied to memory
5091    uint64_t buffer_handle = (uint64_t)(buffer);
5092    bool skipCall =
5093        set_mem_binding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5094    auto buffer_node = dev_data->bufferMap.find(buffer);
5095    if (buffer_node != dev_data->bufferMap.end()) {
5096        buffer_node->second.mem = mem;
5097        VkMemoryRequirements memRequirements;
5098        dev_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, &memRequirements);
5099
5100        // Track and validate bound memory range information
5101        const auto &memEntry = dev_data->memObjMap.find(mem);
5102        if (memEntry != dev_data->memObjMap.end()) {
5103            const MEMORY_RANGE range =
5104                insert_memory_ranges(buffer_handle, mem, memoryOffset, memRequirements, memEntry->second.bufferRanges);
5105            skipCall |=
5106                validate_memory_range(dev_data, memEntry->second.imageRanges, range, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5107        }
5108
5109        // Validate memory requirements alignment
5110        if (vk_safe_modulo(memoryOffset, memRequirements.alignment) != 0) {
5111            skipCall |=
5112                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
5113                        __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS",
5114                        "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the "
5115                        "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
5116                        ", returned from a call to vkGetBufferMemoryRequirements with buffer",
5117                        memoryOffset, memRequirements.alignment);
5118        }
5119        // Validate device limits alignments
5120        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].createInfo.usage;
5121        if (usage & (VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)) {
5122            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment) != 0) {
5123                skipCall |=
5124                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5125                            0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS",
5126                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5127                            "device limit minTexelBufferOffsetAlignment 0x%" PRIxLEAST64,
5128                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment);
5129            }
5130        }
5131        if (usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) {
5132            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) !=
5133                0) {
5134                skipCall |=
5135                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5136                            0, __LINE__, DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
5137                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5138                            "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64,
5139                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
5140            }
5141        }
5142        if (usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) {
5143            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) !=
5144                0) {
5145                skipCall |=
5146                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5147                            0, __LINE__, DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
5148                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5149                            "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64,
5150                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
5151            }
5152        }
5153    }
5154    print_mem_list(dev_data);
5155    lock.unlock();
5156    if (!skipCall) {
5157        result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset);
5158    }
5159    return result;
5160}
5161
5162VKAPI_ATTR void VKAPI_CALL
5163GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5164    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5165    // TODO : What to track here?
5166    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
5167    my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5168}
5169
5170VKAPI_ATTR void VKAPI_CALL
5171GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5172    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5173    // TODO : What to track here?
5174    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
5175    my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements);
5176}
5177
5178VKAPI_ATTR void VKAPI_CALL
5179DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5180    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5181        ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
5182    // TODO : Clean up any internal data structures using this obj.
5183}
5184
5185VKAPI_ATTR void VKAPI_CALL
5186DestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
5187    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5188
5189    std::unique_lock<std::mutex> lock(global_lock);
5190    my_data->shaderModuleMap.erase(shaderModule);
5191    lock.unlock();
5192
5193    my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
5194}
5195
5196VKAPI_ATTR void VKAPI_CALL
5197DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
5198    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator);
5199    // TODO : Clean up any internal data structures using this obj.
5200}
5201
5202VKAPI_ATTR void VKAPI_CALL
5203DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
5204    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5205        ->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
5206    // TODO : Clean up any internal data structures using this obj.
5207}
5208
5209VKAPI_ATTR void VKAPI_CALL
5210DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
5211    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator);
5212    // TODO : Clean up any internal data structures using this obj.
5213}
5214
5215VKAPI_ATTR void VKAPI_CALL
5216DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
5217    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5218        ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
5219    // TODO : Clean up any internal data structures using this obj.
5220}
5221
5222VKAPI_ATTR void VKAPI_CALL
5223DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
5224    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5225        ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
5226    // TODO : Clean up any internal data structures using this obj.
5227}
5228// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result
5229//  If this is a secondary command buffer, then make sure its primary is also in-flight
5230//  If primary is not in-flight, then remove secondary from global in-flight set
5231// This function is only valid at a point when cmdBuffer is being reset or freed
5232static bool checkAndClearCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action) {
5233    bool skip_call = false;
5234    if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
5235        // Primary CB or secondary where primary is also in-flight is an error
5236        if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
5237            (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
5238            skip_call |= log_msg(
5239                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5240                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
5241                "Attempt to %s command buffer (0x%" PRIxLEAST64 ") which is in use.", action,
5242                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer));
5243        } else { // Secondary CB w/o primary in-flight, remove from in-flight
5244            dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer);
5245        }
5246    }
5247    return skip_call;
5248}
5249// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
5250static bool checkAndClearCommandBuffersInFlight(layer_data *dev_data, const VkCommandPool commandPool, const char *action) {
5251    bool skip_call = false;
5252    auto pool_data = dev_data->commandPoolMap.find(commandPool);
5253    if (pool_data != dev_data->commandPoolMap.end()) {
5254        for (auto cmd_buffer : pool_data->second.commandBuffers) {
5255            if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
5256                skip_call |= checkAndClearCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action);
5257            }
5258        }
5259    }
5260    return skip_call;
5261}
5262
5263VKAPI_ATTR void VKAPI_CALL
5264FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
5265    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5266
5267    bool skip_call = false;
5268    std::unique_lock<std::mutex> lock(global_lock);
5269    for (uint32_t i = 0; i < commandBufferCount; i++) {
5270        auto cb_pair = dev_data->commandBufferMap.find(pCommandBuffers[i]);
5271        skip_call |= checkAndClearCommandBufferInFlight(dev_data, cb_pair->second, "free");
5272        // Delete CB information structure, and remove from commandBufferMap
5273        if (cb_pair != dev_data->commandBufferMap.end()) {
5274            // reset prior to delete for data clean-up
5275            resetCB(dev_data, (*cb_pair).second->commandBuffer);
5276            delete (*cb_pair).second;
5277            dev_data->commandBufferMap.erase(cb_pair);
5278        }
5279
5280        // Remove commandBuffer reference from commandPoolMap
5281        dev_data->commandPoolMap[commandPool].commandBuffers.remove(pCommandBuffers[i]);
5282    }
5283    printCBList(dev_data);
5284    lock.unlock();
5285
5286    if (!skip_call)
5287        dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
5288}
5289
5290VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
5291                                                 const VkAllocationCallbacks *pAllocator,
5292                                                 VkCommandPool *pCommandPool) {
5293    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5294
5295    VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
5296
5297    if (VK_SUCCESS == result) {
5298        std::lock_guard<std::mutex> lock(global_lock);
5299        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
5300        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
5301    }
5302    return result;
5303}
5304
5305VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
5306                                               const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
5307
5308    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5309    VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
5310    if (result == VK_SUCCESS) {
5311        std::lock_guard<std::mutex> lock(global_lock);
5312        dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo;
5313    }
5314    return result;
5315}
5316
5317// Destroy commandPool along with all of the commandBuffers allocated from that pool
5318VKAPI_ATTR void VKAPI_CALL
5319DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
5320    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5321    bool skipCall = false;
5322    std::unique_lock<std::mutex> lock(global_lock);
5323    // Verify that command buffers in pool are complete (not in-flight)
5324    VkBool32 result = checkAndClearCommandBuffersInFlight(dev_data, commandPool, "destroy command pool with");
5325    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandPoolMap
5326    auto pool_it = dev_data->commandPoolMap.find(commandPool);
5327    if (pool_it != dev_data->commandPoolMap.end()) {
5328        for (auto cb : pool_it->second.commandBuffers) {
5329            clear_cmd_buf_and_mem_references(dev_data, cb);
5330            auto del_cb = dev_data->commandBufferMap.find(cb);
5331            delete del_cb->second;                  // delete CB info structure
5332            dev_data->commandBufferMap.erase(del_cb); // Remove this command buffer
5333        }
5334    }
5335    dev_data->commandPoolMap.erase(commandPool);
5336
5337    lock.unlock();
5338
5339    if (result)
5340        return;
5341
5342    if (!skipCall)
5343        dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
5344}
5345
5346VKAPI_ATTR VkResult VKAPI_CALL
5347ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
5348    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5349    bool skipCall = false;
5350    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5351
5352    if (checkAndClearCommandBuffersInFlight(dev_data, commandPool, "reset command pool with"))
5353        return VK_ERROR_VALIDATION_FAILED_EXT;
5354
5355    if (!skipCall)
5356        result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
5357
5358    // Reset all of the CBs allocated from this pool
5359    if (VK_SUCCESS == result) {
5360        std::lock_guard<std::mutex> lock(global_lock);
5361        auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
5362        while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
5363            resetCB(dev_data, (*it));
5364            ++it;
5365        }
5366    }
5367    return result;
5368}
5369
5370VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
5371    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5372    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5373    bool skipCall = false;
5374    std::unique_lock<std::mutex> lock(global_lock);
5375    for (uint32_t i = 0; i < fenceCount; ++i) {
5376        auto fence_item = dev_data->fenceMap.find(pFences[i]);
5377        if (fence_item != dev_data->fenceMap.end()) {
5378            fence_item->second.needsSignaled = true;
5379            fence_item->second.queues.clear();
5380            fence_item->second.priorFences.clear();
5381            if (fence_item->second.in_use.load()) {
5382                skipCall |=
5383                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5384                            reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5385                            "Fence 0x%" PRIx64 " is in use by a command buffer.", reinterpret_cast<const uint64_t &>(pFences[i]));
5386            }
5387        }
5388    }
5389    lock.unlock();
5390    if (!skipCall)
5391        result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences);
5392    return result;
5393}
5394
5395VKAPI_ATTR void VKAPI_CALL
5396DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
5397    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5398    std::unique_lock<std::mutex> lock(global_lock);
5399    auto fbNode = dev_data->frameBufferMap.find(framebuffer);
5400    if (fbNode != dev_data->frameBufferMap.end()) {
5401        for (auto cb : fbNode->second.referencingCmdBuffers) {
5402            auto cbNode = dev_data->commandBufferMap.find(cb);
5403            if (cbNode != dev_data->commandBufferMap.end()) {
5404                // Set CB as invalid and record destroyed framebuffer
5405                cbNode->second->state = CB_INVALID;
5406                cbNode->second->destroyedFramebuffers.insert(framebuffer);
5407            }
5408        }
5409        delete [] fbNode->second.createInfo.pAttachments;
5410        dev_data->frameBufferMap.erase(fbNode);
5411    }
5412    lock.unlock();
5413    dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
5414}
5415
5416VKAPI_ATTR void VKAPI_CALL
5417DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
5418    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5419    dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
5420    std::lock_guard<std::mutex> lock(global_lock);
5421    dev_data->renderPassMap.erase(renderPass);
5422}
5423
5424VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
5425                                            const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
5426    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5427
5428    VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
5429
5430    if (VK_SUCCESS == result) {
5431        std::lock_guard<std::mutex> lock(global_lock);
5432        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
5433        dev_data->bufferMap.insert(std::make_pair(*pBuffer, BUFFER_NODE(pCreateInfo)));
5434    }
5435    return result;
5436}
5437
5438VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
5439                                                const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
5440    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5441    VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
5442    if (VK_SUCCESS == result) {
5443        std::lock_guard<std::mutex> lock(global_lock);
5444        dev_data->bufferViewMap[*pView] = VkBufferViewCreateInfo(*pCreateInfo);
5445        // In order to create a valid buffer view, the buffer must have been created with at least one of the
5446        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
5447        validate_buffer_usage_flags(dev_data, pCreateInfo->buffer,
5448                                    VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false,
5449                                    "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
5450    }
5451    return result;
5452}
5453
5454VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
5455                                           const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
5456    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5457
5458    VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
5459
5460    if (VK_SUCCESS == result) {
5461        std::lock_guard<std::mutex> lock(global_lock);
5462        IMAGE_LAYOUT_NODE image_node;
5463        image_node.layout = pCreateInfo->initialLayout;
5464        image_node.format = pCreateInfo->format;
5465        dev_data->imageMap.insert(std::make_pair(*pImage, IMAGE_NODE(pCreateInfo)));
5466        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
5467        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
5468        dev_data->imageLayoutMap[subpair] = image_node;
5469    }
5470    return result;
5471}
5472
5473static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
5474    /* expects global_lock to be held by caller */
5475
5476    auto image_node_it = dev_data->imageMap.find(image);
5477    if (image_node_it != dev_data->imageMap.end()) {
5478        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
5479         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
5480         * the actual values.
5481         */
5482        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
5483            range->levelCount = image_node_it->second.createInfo.mipLevels - range->baseMipLevel;
5484        }
5485
5486        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
5487            range->layerCount = image_node_it->second.createInfo.arrayLayers - range->baseArrayLayer;
5488        }
5489    }
5490}
5491
5492// Return the correct layer/level counts if the caller used the special
5493// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
5494static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
5495                                         VkImage image) {
5496    /* expects global_lock to be held by caller */
5497
5498    *levels = range.levelCount;
5499    *layers = range.layerCount;
5500    auto image_node_it = dev_data->imageMap.find(image);
5501    if (image_node_it != dev_data->imageMap.end()) {
5502        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
5503            *levels = image_node_it->second.createInfo.mipLevels - range.baseMipLevel;
5504        }
5505        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
5506            *layers = image_node_it->second.createInfo.arrayLayers - range.baseArrayLayer;
5507        }
5508    }
5509}
5510
5511VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
5512                                               const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
5513    bool skipCall = false;
5514    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5515    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5516    {
5517        // Validate that img has correct usage flags set
5518        std::lock_guard<std::mutex> lock(global_lock);
5519        skipCall |= validate_image_usage_flags(dev_data, pCreateInfo->image,
5520                VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT |
5521                VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
5522                false, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT");
5523    }
5524
5525    if (!skipCall) {
5526        result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
5527    }
5528
5529    if (VK_SUCCESS == result) {
5530        std::lock_guard<std::mutex> lock(global_lock);
5531        VkImageViewCreateInfo localCI = VkImageViewCreateInfo(*pCreateInfo);
5532        ResolveRemainingLevelsLayers(dev_data, &localCI.subresourceRange, pCreateInfo->image);
5533        dev_data->imageViewMap[*pView] = localCI;
5534    }
5535
5536    return result;
5537}
5538
5539VKAPI_ATTR VkResult VKAPI_CALL
5540CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
5541    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5542    VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
5543    if (VK_SUCCESS == result) {
5544        std::lock_guard<std::mutex> lock(global_lock);
5545        auto &fence_node = dev_data->fenceMap[*pFence];
5546        fence_node.createInfo = *pCreateInfo;
5547        fence_node.needsSignaled = true;
5548        if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
5549            fence_node.firstTimeFlag = true;
5550            fence_node.needsSignaled = false;
5551        }
5552        fence_node.in_use.store(0);
5553    }
5554    return result;
5555}
5556
5557// TODO handle pipeline caches
5558VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
5559                                                   const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
5560    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5561    VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
5562    return result;
5563}
5564
5565VKAPI_ATTR void VKAPI_CALL
5566DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
5567    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5568    dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator);
5569}
5570
5571VKAPI_ATTR VkResult VKAPI_CALL
5572GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
5573    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5574    VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
5575    return result;
5576}
5577
5578VKAPI_ATTR VkResult VKAPI_CALL
5579MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
5580    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5581    VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
5582    return result;
5583}
5584
5585// utility function to set collective state for pipeline
5586void set_pipeline_state(PIPELINE_NODE *pPipe) {
5587    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
5588    if (pPipe->graphicsPipelineCI.pColorBlendState) {
5589        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
5590            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
5591                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5592                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5593                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5594                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5595                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5596                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5597                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5598                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
5599                    pPipe->blendConstantsEnabled = true;
5600                }
5601            }
5602        }
5603    }
5604}
5605
5606VKAPI_ATTR VkResult VKAPI_CALL
5607CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
5608                        const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
5609                        VkPipeline *pPipelines) {
5610    VkResult result = VK_SUCCESS;
5611    // TODO What to do with pipelineCache?
5612    // The order of operations here is a little convoluted but gets the job done
5613    //  1. Pipeline create state is first shadowed into PIPELINE_NODE struct
5614    //  2. Create state is then validated (which uses flags setup during shadowing)
5615    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
5616    bool skipCall = false;
5617    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
5618    vector<PIPELINE_NODE *> pPipeNode(count);
5619    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5620
5621    uint32_t i = 0;
5622    std::unique_lock<std::mutex> lock(global_lock);
5623
5624    for (i = 0; i < count; i++) {
5625        pPipeNode[i] = new PIPELINE_NODE;
5626        pPipeNode[i]->initGraphicsPipeline(&pCreateInfos[i]);
5627        pPipeNode[i]->renderPass = getRenderPass(dev_data, pCreateInfos[i].renderPass);
5628        pPipeNode[i]->pipelineLayout = getPipelineLayout(dev_data, pCreateInfos[i].layout);
5629
5630        skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
5631    }
5632
5633    if (!skipCall) {
5634        lock.unlock();
5635        result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
5636                                                                          pPipelines);
5637        lock.lock();
5638        for (i = 0; i < count; i++) {
5639            pPipeNode[i]->pipeline = pPipelines[i];
5640            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
5641        }
5642        lock.unlock();
5643    } else {
5644        for (i = 0; i < count; i++) {
5645            delete pPipeNode[i];
5646        }
5647        lock.unlock();
5648        return VK_ERROR_VALIDATION_FAILED_EXT;
5649    }
5650    return result;
5651}
5652
5653VKAPI_ATTR VkResult VKAPI_CALL
5654CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
5655                       const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
5656                       VkPipeline *pPipelines) {
5657    VkResult result = VK_SUCCESS;
5658    bool skipCall = false;
5659
5660    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
5661    vector<PIPELINE_NODE *> pPipeNode(count);
5662    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5663
5664    uint32_t i = 0;
5665    std::unique_lock<std::mutex> lock(global_lock);
5666    for (i = 0; i < count; i++) {
5667        // TODO: Verify compute stage bits
5668
5669        // Create and initialize internal tracking data structure
5670        pPipeNode[i] = new PIPELINE_NODE;
5671        pPipeNode[i]->initComputePipeline(&pCreateInfos[i]);
5672        pPipeNode[i]->pipelineLayout = getPipelineLayout(dev_data, pCreateInfos[i].layout);
5673        // memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
5674
5675        // TODO: Add Compute Pipeline Verification
5676        // skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
5677    }
5678
5679    if (!skipCall) {
5680        lock.unlock();
5681        result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
5682                                                                         pPipelines);
5683        lock.lock();
5684        for (i = 0; i < count; i++) {
5685            pPipeNode[i]->pipeline = pPipelines[i];
5686            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
5687        }
5688        lock.unlock();
5689    } else {
5690        for (i = 0; i < count; i++) {
5691            // Clean up any locally allocated data structures
5692            delete pPipeNode[i];
5693        }
5694        lock.unlock();
5695        return VK_ERROR_VALIDATION_FAILED_EXT;
5696    }
5697    return result;
5698}
5699
5700VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
5701                                             const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
5702    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5703    VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler);
5704    if (VK_SUCCESS == result) {
5705        std::lock_guard<std::mutex> lock(global_lock);
5706        dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
5707    }
5708    return result;
5709}
5710
5711VKAPI_ATTR VkResult VKAPI_CALL
5712CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
5713                          const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
5714    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5715    VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
5716    if (VK_SUCCESS == result) {
5717        // TODOSC : Capture layout bindings set
5718        std::lock_guard<std::mutex> lock(global_lock);
5719        dev_data->descriptorSetLayoutMap[*pSetLayout] =
5720            new cvdescriptorset::DescriptorSetLayout(dev_data->report_data, pCreateInfo, *pSetLayout);
5721    }
5722    return result;
5723}
5724
5725// Used by CreatePipelineLayout and CmdPushConstants.
5726// Note that the index argument is optional and only used by CreatePipelineLayout.
5727static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
5728                                      const char *caller_name, uint32_t index = 0) {
5729    uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
5730    bool skipCall = false;
5731    // Check that offset + size don't exceed the max.
5732    // Prevent arithetic overflow here by avoiding addition and testing in this order.
5733    if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
5734        // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
5735        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5736            skipCall |=
5737                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5738                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with offset %u and size %u that "
5739                                                              "exceeds this device's maxPushConstantSize of %u.",
5740                        caller_name, index, offset, size, maxPushConstantsSize);
5741        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5742            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5743                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
5744                                                                      "exceeds this device's maxPushConstantSize of %u.",
5745                                caller_name, offset, size, maxPushConstantsSize);
5746        } else {
5747            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5748                                DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
5749        }
5750    }
5751    // size needs to be non-zero and a multiple of 4.
5752    if ((size == 0) || ((size & 0x3) != 0)) {
5753        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5754            skipCall |=
5755                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5756                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
5757                                                              "size %u. Size must be greater than zero and a multiple of 4.",
5758                        caller_name, index, size);
5759        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5760            skipCall |=
5761                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5762                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
5763                                                              "size %u. Size must be greater than zero and a multiple of 4.",
5764                        caller_name, size);
5765        } else {
5766            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5767                                DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
5768        }
5769    }
5770    // offset needs to be a multiple of 4.
5771    if ((offset & 0x3) != 0) {
5772        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5773            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5774                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
5775                                                                      "offset %u. Offset must be a multiple of 4.",
5776                                caller_name, index, offset);
5777        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5778            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5779                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
5780                                                                      "offset %u. Offset must be a multiple of 4.",
5781                                caller_name, offset);
5782        } else {
5783            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5784                                DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
5785        }
5786    }
5787    return skipCall;
5788}
5789
5790VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
5791                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
5792    bool skipCall = false;
5793    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5794    // Push Constant Range checks
5795    uint32_t i = 0;
5796    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5797        skipCall |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
5798                                              pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
5799        if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
5800            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5801                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has no stageFlags set.");
5802        }
5803    }
5804    // Each range has been validated.  Now check for overlap between ranges (if they are good).
5805    if (!skipCall) {
5806        uint32_t i, j;
5807        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5808            for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
5809                const uint32_t minA = pCreateInfo->pPushConstantRanges[i].offset;
5810                const uint32_t maxA = minA + pCreateInfo->pPushConstantRanges[i].size;
5811                const uint32_t minB = pCreateInfo->pPushConstantRanges[j].offset;
5812                const uint32_t maxB = minB + pCreateInfo->pPushConstantRanges[j].size;
5813                if ((minA <= minB && maxA > minB) || (minB <= minA && maxB > minA)) {
5814                    skipCall |=
5815                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5816                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constants with "
5817                                                                      "overlapping ranges: %u:[%u, %u), %u:[%u, %u)",
5818                                i, minA, maxA, j, minB, maxB);
5819                }
5820            }
5821        }
5822    }
5823
5824    if (skipCall)
5825        return VK_ERROR_VALIDATION_FAILED_EXT;
5826
5827    VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
5828    if (VK_SUCCESS == result) {
5829        std::lock_guard<std::mutex> lock(global_lock);
5830        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
5831        plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount);
5832        plNode.setLayouts.resize(pCreateInfo->setLayoutCount);
5833        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
5834            plNode.descriptorSetLayouts[i] = pCreateInfo->pSetLayouts[i];
5835            plNode.setLayouts[i] = getDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
5836        }
5837        plNode.pushConstantRanges.resize(pCreateInfo->pushConstantRangeCount);
5838        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5839            plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i];
5840        }
5841    }
5842    return result;
5843}
5844
5845VKAPI_ATTR VkResult VKAPI_CALL
5846CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
5847                     VkDescriptorPool *pDescriptorPool) {
5848    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5849    VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
5850    if (VK_SUCCESS == result) {
5851        // Insert this pool into Global Pool LL at head
5852        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5853                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool 0x%" PRIxLEAST64,
5854                    (uint64_t)*pDescriptorPool))
5855            return VK_ERROR_VALIDATION_FAILED_EXT;
5856        DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
5857        if (NULL == pNewNode) {
5858            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5859                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
5860                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
5861                return VK_ERROR_VALIDATION_FAILED_EXT;
5862        } else {
5863            std::lock_guard<std::mutex> lock(global_lock);
5864            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
5865        }
5866    } else {
5867        // Need to do anything if pool create fails?
5868    }
5869    return result;
5870}
5871
5872VKAPI_ATTR VkResult VKAPI_CALL
5873ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
5874    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5875    VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags);
5876    if (VK_SUCCESS == result) {
5877        std::lock_guard<std::mutex> lock(global_lock);
5878        clearDescriptorPool(dev_data, device, descriptorPool, flags);
5879    }
5880    return result;
5881}
5882
5883// Ensure the pool contains enough descriptors and descriptor sets to satisfy
5884// an allocation request. Fills requiredDescriptorsByType with the total number
5885// of descriptors of each type required, for later update.
5886static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, DESCRIPTOR_POOL_NODE *pPoolNode, uint32_t count,
5887                                                  std::vector<cvdescriptorset::DescriptorSetLayout const *> const & layout_nodes,
5888                                                  uint32_t requiredDescriptorsByType[]) {
5889    bool skipCall = false;
5890
5891    // Track number of descriptorSets allowable in this pool
5892    if (pPoolNode->availableSets < count) {
5893        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5894                            reinterpret_cast<uint64_t &>(pPoolNode->pool), __LINE__, DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
5895                            "Unable to allocate %u descriptorSets from pool 0x%" PRIxLEAST64
5896                            ". This pool only has %d descriptorSets remaining.",
5897                            count, reinterpret_cast<uint64_t &>(pPoolNode->pool), pPoolNode->availableSets);
5898    }
5899
5900    // Count total descriptors required per type
5901    for (auto layout_node : layout_nodes) {
5902        if (layout_node) {
5903            for (uint32_t j = 0; j < layout_node->GetBindingCount(); ++j) {
5904                const auto &binding_layout = layout_node->GetDescriptorSetLayoutBindingPtrFromIndex(j);
5905                uint32_t typeIndex = static_cast<uint32_t>(binding_layout->descriptorType);
5906                requiredDescriptorsByType[typeIndex] += binding_layout->descriptorCount;
5907            }
5908        }
5909    }
5910
5911    // Determine whether descriptor counts are satisfiable
5912    for (uint32_t i = 0; i < VK_DESCRIPTOR_TYPE_RANGE_SIZE; i++) {
5913        if (requiredDescriptorsByType[i] > pPoolNode->availableDescriptorTypeCount[i]) {
5914            skipCall |= log_msg(
5915                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5916                    reinterpret_cast<const uint64_t &>(pPoolNode->pool), __LINE__, DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
5917                    "Unable to allocate %u descriptors of type %s from pool 0x%" PRIxLEAST64
5918                    ". This pool only has %d descriptors of this type remaining.",
5919                    requiredDescriptorsByType[i], string_VkDescriptorType(VkDescriptorType(i)), (uint64_t)pPoolNode->pool,
5920                    pPoolNode->availableDescriptorTypeCount[i]);
5921        }
5922    }
5923
5924    return skipCall;
5925}
5926
5927VKAPI_ATTR VkResult VKAPI_CALL
5928AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
5929    bool skipCall = false;
5930    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5931
5932    uint32_t requiredDescriptorsByType[VK_DESCRIPTOR_TYPE_RANGE_SIZE] {};
5933    std::vector<cvdescriptorset::DescriptorSetLayout const *> layout_nodes(pAllocateInfo->descriptorSetCount, nullptr);
5934
5935    std::unique_lock<std::mutex> lock(global_lock);
5936
5937    for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
5938        layout_nodes[i] = getDescriptorSetLayout(dev_data, pAllocateInfo->pSetLayouts[i]);
5939        if (!layout_nodes[i]) {
5940            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
5941                        (uint64_t)pAllocateInfo->pSetLayouts[i], __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
5942                        "Unable to find set layout node for layout 0x%" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
5943                        (uint64_t)pAllocateInfo->pSetLayouts[i]);
5944        }
5945    }
5946
5947    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
5948
5949    if (!pPoolNode) {
5950        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5951                            (uint64_t)pAllocateInfo->descriptorPool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
5952                            "Unable to find pool node for pool 0x%" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
5953                            (uint64_t)pAllocateInfo->descriptorPool);
5954    } else { // Make sure pool has all the available descriptors before calling down chain
5955        skipCall |= PreCallValidateAllocateDescriptorSets(dev_data, pPoolNode, pAllocateInfo->descriptorSetCount,
5956                                                          layout_nodes, requiredDescriptorsByType);
5957    }
5958    lock.unlock();
5959
5960    if (skipCall)
5961        return VK_ERROR_VALIDATION_FAILED_EXT;
5962
5963    VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
5964
5965    if (VK_SUCCESS == result) {
5966        lock.lock();
5967        if (pPoolNode) {
5968            /* Account for sets and descriptors allocated */
5969            pPoolNode->availableSets -= pAllocateInfo->descriptorSetCount;
5970            for (uint32_t i = 0; i < VK_DESCRIPTOR_TYPE_RANGE_SIZE; i++) {
5971                pPoolNode->availableDescriptorTypeCount[i] -= requiredDescriptorsByType[i];
5972            }
5973
5974            /* Create tracking object for each descriptor set; insert into
5975             * global map and the pool's set.
5976             */
5977            for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
5978                if (layout_nodes[i]) {
5979                    auto pNewNode = new cvdescriptorset::DescriptorSet(
5980                            pDescriptorSets[i], layout_nodes[i], &dev_data->bufferMap, &dev_data->memObjMap, &dev_data->bufferViewMap,
5981                            &dev_data->samplerMap, &dev_data->imageViewMap, &dev_data->imageMap,
5982                            &dev_data->device_extensions.imageToSwapchainMap, &dev_data->device_extensions.swapchainMap);
5983
5984                    pPoolNode->sets.insert(pNewNode);
5985                    pNewNode->in_use.store(0);
5986                    dev_data->setMap[pDescriptorSets[i]] = pNewNode;
5987                }
5988            }
5989        }
5990        lock.unlock();
5991    }
5992    return result;
5993}
5994
5995VKAPI_ATTR VkResult VKAPI_CALL
5996FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
5997    bool skipCall = false;
5998    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5999    // Make sure that no sets being destroyed are in-flight
6000    std::unique_lock<std::mutex> lock(global_lock);
6001    for (uint32_t i = 0; i < count; ++i)
6002        skipCall |= validateIdleDescriptorSet(dev_data, pDescriptorSets[i], "vkFreeDescriptorSets");
6003    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, descriptorPool);
6004    if (pPoolNode && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pPoolNode->createInfo.flags)) {
6005        // Can't Free from a NON_FREE pool
6006        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
6007                            (uint64_t)device, __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
6008                            "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6009                            "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
6010    }
6011    lock.unlock();
6012    if (skipCall)
6013        return VK_ERROR_VALIDATION_FAILED_EXT;
6014    VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6015    if (VK_SUCCESS == result) {
6016        lock.lock();
6017
6018        // Update available descriptor sets in pool
6019        pPoolNode->availableSets += count;
6020
6021        // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
6022        for (uint32_t i = 0; i < count; ++i) {
6023            cvdescriptorset::DescriptorSet *pSet = dev_data->setMap[pDescriptorSets[i]]; // getSetNode() without locking
6024            uint32_t typeIndex = 0, poolSizeCount = 0;
6025            for (uint32_t j = 0; j < pSet->GetBindingCount(); ++j) {
6026                typeIndex = static_cast<uint32_t>(pSet->GetTypeFromIndex(j));
6027                poolSizeCount = pSet->GetDescriptorCountFromIndex(j);
6028                pPoolNode->availableDescriptorTypeCount[typeIndex] += poolSizeCount;
6029            }
6030            freeDescriptorSet(dev_data, pSet);
6031            pPoolNode->sets.erase(pSet);
6032        }
6033        lock.unlock();
6034    }
6035    // TODO : Any other clean-up or book-keeping to do here?
6036    return result;
6037}
6038// TODO : This is a Proof-of-concept for core validation architecture
6039//  Really we'll want to break out these functions to separate files but
6040//  keeping it all together here to prove out design
6041// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
6042static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6043                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6044                                                const VkCopyDescriptorSet *pDescriptorCopies) {
6045    // First thing to do is perform map look-ups.
6046    // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
6047    //  so we can't just do a single map look-up up-front, but do them individually in functions below
6048
6049    // Now make call(s) that validate state, but don't perform state updates in this function
6050    // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
6051    //  namespace which will parse params and make calls into specific class instances
6052    return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data->setMap, descriptorWriteCount,
6053                                                         pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
6054}
6055// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
6056static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6057                                               const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6058                                               const VkCopyDescriptorSet *pDescriptorCopies) {
6059    cvdescriptorset::PerformUpdateDescriptorSets(dev_data->setMap, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6060                                                 pDescriptorCopies);
6061}
6062
6063VKAPI_ATTR void VKAPI_CALL
6064UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6065                     uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6066    // Only map look-up at top level is for device-level layer_data
6067    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6068    std::unique_lock<std::mutex> lock(global_lock);
6069    bool skip_call = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6070                                                         pDescriptorCopies);
6071    lock.unlock();
6072    if (!skip_call) {
6073        dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6074                                                              pDescriptorCopies);
6075        lock.lock();
6076        // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
6077        PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6078                                           pDescriptorCopies);
6079    }
6080}
6081
6082VKAPI_ATTR VkResult VKAPI_CALL
6083AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6084    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6085    VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6086    if (VK_SUCCESS == result) {
6087        std::unique_lock<std::mutex> lock(global_lock);
6088        auto const &cp_it = dev_data->commandPoolMap.find(pCreateInfo->commandPool);
6089        if (cp_it != dev_data->commandPoolMap.end()) {
6090            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6091                // Add command buffer to its commandPool map
6092                cp_it->second.commandBuffers.push_back(pCommandBuffer[i]);
6093                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6094                // Add command buffer to map
6095                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6096                resetCB(dev_data, pCommandBuffer[i]);
6097                pCB->createInfo = *pCreateInfo;
6098                pCB->device = device;
6099            }
6100        }
6101        printCBList(dev_data);
6102        lock.unlock();
6103    }
6104    return result;
6105}
6106
6107VKAPI_ATTR VkResult VKAPI_CALL
6108BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6109    bool skipCall = false;
6110    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6111    std::unique_lock<std::mutex> lock(global_lock);
6112    // Validate command buffer level
6113    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6114    if (pCB) {
6115        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6116        if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
6117            skipCall |=
6118                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6119                        (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6120                        "Calling vkBeginCommandBuffer() on active CB 0x%p before it has completed. "
6121                        "You must check CB fence before this call.",
6122                        commandBuffer);
6123        }
6124        clear_cmd_buf_and_mem_references(dev_data, pCB);
6125        if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6126            // Secondary Command Buffer
6127            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6128            if (!pInfo) {
6129                skipCall |=
6130                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6131                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6132                            "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info.",
6133                            reinterpret_cast<void *>(commandBuffer));
6134            } else {
6135                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6136                    if (!pInfo->renderPass) { // renderpass should NOT be null for a Secondary CB
6137                        skipCall |= log_msg(
6138                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6139                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6140                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must specify a valid renderpass parameter.",
6141                            reinterpret_cast<void *>(commandBuffer));
6142                    }
6143                    if (!pInfo->framebuffer) { // framebuffer may be null for a Secondary CB, but this affects perf
6144                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6145                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6146                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE,
6147                                            "DS", "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) may perform better if a "
6148                                                  "valid framebuffer parameter is specified.",
6149                                            reinterpret_cast<void *>(commandBuffer));
6150                    } else {
6151                        string errorString = "";
6152                        auto framebuffer = getFramebuffer(dev_data, pInfo->framebuffer);
6153                        if (framebuffer) {
6154                            VkRenderPass fbRP = framebuffer->createInfo.renderPass;
6155                            if (!verify_renderpass_compatibility(dev_data, fbRP, pInfo->renderPass, errorString)) {
6156                                // renderPass that framebuffer was created with must be compatible with local renderPass
6157                                skipCall |=
6158                                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6159                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6160                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
6161                                            "DS", "vkBeginCommandBuffer(): Secondary Command "
6162                                                  "Buffer (0x%p) renderPass (0x%" PRIxLEAST64 ") is incompatible w/ framebuffer "
6163                                                  "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
6164                                            reinterpret_cast<void *>(commandBuffer), (uint64_t)(pInfo->renderPass),
6165                                            (uint64_t)(pInfo->framebuffer), (uint64_t)(fbRP), errorString.c_str());
6166                            }
6167                            // Connect this framebuffer to this cmdBuffer
6168                            framebuffer->referencingCmdBuffers.insert(pCB->commandBuffer);
6169                        }
6170                    }
6171                }
6172                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
6173                     dev_data->phys_dev_properties.features.occlusionQueryPrecise == VK_FALSE) &&
6174                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
6175                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6176                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6177                                        __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6178                                        "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
6179                                        "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
6180                                        "support precise occlusion queries.",
6181                                        reinterpret_cast<void *>(commandBuffer));
6182                }
6183            }
6184            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
6185                auto renderPass = getRenderPass(dev_data, pInfo->renderPass);
6186                if (renderPass) {
6187                    if (pInfo->subpass >= renderPass->pCreateInfo->subpassCount) {
6188                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6189                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
6190                                            DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6191                                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must has a subpass index (%d) "
6192                                            "that is less than the number of subpasses (%d).",
6193                                            (void *)commandBuffer, pInfo->subpass, renderPass->pCreateInfo->subpassCount);
6194                    }
6195                }
6196            }
6197        }
6198        if (CB_RECORDING == pCB->state) {
6199            skipCall |=
6200                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6201                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6202                        "vkBeginCommandBuffer(): Cannot call Begin on CB (0x%" PRIxLEAST64
6203                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
6204                        (uint64_t)commandBuffer);
6205        } else if (CB_RECORDED == pCB->state || (CB_INVALID == pCB->state && CMD_END == pCB->cmds.back().type)) {
6206            VkCommandPool cmdPool = pCB->createInfo.commandPool;
6207            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
6208                skipCall |=
6209                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6210                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6211                            "Call to vkBeginCommandBuffer() on command buffer (0x%" PRIxLEAST64
6212                            ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
6213                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6214                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
6215            }
6216            resetCB(dev_data, commandBuffer);
6217        }
6218        // Set updated state here in case implicit reset occurs above
6219        pCB->state = CB_RECORDING;
6220        pCB->beginInfo = *pBeginInfo;
6221        if (pCB->beginInfo.pInheritanceInfo) {
6222            pCB->inheritanceInfo = *(pCB->beginInfo.pInheritanceInfo);
6223            pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo;
6224            // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
6225            if ((pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
6226                (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6227                pCB->activeRenderPass = getRenderPass(dev_data, pCB->beginInfo.pInheritanceInfo->renderPass);
6228                pCB->activeSubpass = pCB->beginInfo.pInheritanceInfo->subpass;
6229                pCB->framebuffers.insert(pCB->beginInfo.pInheritanceInfo->framebuffer);
6230            }
6231        }
6232    } else {
6233        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6234                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
6235                            "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB 0x%p!", (void *)commandBuffer);
6236    }
6237    lock.unlock();
6238    if (skipCall) {
6239        return VK_ERROR_VALIDATION_FAILED_EXT;
6240    }
6241    VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo);
6242
6243    return result;
6244}
6245
6246VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
6247    bool skipCall = false;
6248    VkResult result = VK_SUCCESS;
6249    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6250    std::unique_lock<std::mutex> lock(global_lock);
6251    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6252    if (pCB) {
6253        if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) || !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6254            // This needs spec clarification to update valid usage, see comments in PR:
6255            // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
6256            skipCall |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer");
6257        }
6258        skipCall |= addCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
6259        for (auto query : pCB->activeQueries) {
6260            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6261                                DRAWSTATE_INVALID_QUERY, "DS",
6262                                "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d",
6263                                (uint64_t)(query.pool), query.index);
6264        }
6265    }
6266    if (!skipCall) {
6267        lock.unlock();
6268        result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
6269        lock.lock();
6270        if (VK_SUCCESS == result) {
6271            pCB->state = CB_RECORDED;
6272            // Reset CB status flags
6273            pCB->status = 0;
6274            printCB(dev_data, commandBuffer);
6275        }
6276    } else {
6277        result = VK_ERROR_VALIDATION_FAILED_EXT;
6278    }
6279    lock.unlock();
6280    return result;
6281}
6282
6283VKAPI_ATTR VkResult VKAPI_CALL
6284ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
6285    bool skip_call = false;
6286    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6287    std::unique_lock<std::mutex> lock(global_lock);
6288    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6289    VkCommandPool cmdPool = pCB->createInfo.commandPool;
6290    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
6291        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6292                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6293                             "Attempt to reset command buffer (0x%" PRIxLEAST64 ") created from command pool (0x%" PRIxLEAST64
6294                             ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6295                             (uint64_t)commandBuffer, (uint64_t)cmdPool);
6296    }
6297    skip_call |= checkAndClearCommandBufferInFlight(dev_data, pCB, "reset");
6298    lock.unlock();
6299    if (skip_call)
6300        return VK_ERROR_VALIDATION_FAILED_EXT;
6301    VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags);
6302    if (VK_SUCCESS == result) {
6303        lock.lock();
6304        resetCB(dev_data, commandBuffer);
6305        lock.unlock();
6306    }
6307    return result;
6308}
6309
6310VKAPI_ATTR void VKAPI_CALL
6311CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
6312    bool skipCall = false;
6313    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6314    std::unique_lock<std::mutex> lock(global_lock);
6315    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6316    if (pCB) {
6317        skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
6318        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
6319            skipCall |=
6320                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6321                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
6322                        "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
6323                        (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass->renderPass);
6324        }
6325
6326        PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
6327        if (pPN) {
6328            pCB->lastBound[pipelineBindPoint].pipeline = pipeline;
6329            set_cb_pso_status(pCB, pPN);
6330            set_pipeline_state(pPN);
6331        } else {
6332            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6333                                (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
6334                                "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
6335        }
6336    }
6337    lock.unlock();
6338    if (!skipCall)
6339        dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
6340}
6341
6342VKAPI_ATTR void VKAPI_CALL
6343CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
6344    bool skipCall = false;
6345    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6346    std::unique_lock<std::mutex> lock(global_lock);
6347    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6348    if (pCB) {
6349        skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
6350        pCB->status |= CBSTATUS_VIEWPORT_SET;
6351        pCB->viewports.resize(viewportCount);
6352        memcpy(pCB->viewports.data(), pViewports, viewportCount * sizeof(VkViewport));
6353    }
6354    lock.unlock();
6355    if (!skipCall)
6356        dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
6357}
6358
6359VKAPI_ATTR void VKAPI_CALL
6360CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
6361    bool skipCall = false;
6362    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6363    std::unique_lock<std::mutex> lock(global_lock);
6364    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6365    if (pCB) {
6366        skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
6367        pCB->status |= CBSTATUS_SCISSOR_SET;
6368        pCB->scissors.resize(scissorCount);
6369        memcpy(pCB->scissors.data(), pScissors, scissorCount * sizeof(VkRect2D));
6370    }
6371    lock.unlock();
6372    if (!skipCall)
6373        dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
6374}
6375
6376VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
6377    bool skip_call = false;
6378    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6379    std::unique_lock<std::mutex> lock(global_lock);
6380    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6381    if (pCB) {
6382        skip_call |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
6383        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
6384
6385        PIPELINE_NODE *pPipeTrav = getPipeline(dev_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
6386        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
6387            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
6388                                 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SET, "DS",
6389                                 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
6390                                 "flag.  This is undefined behavior and could be ignored.");
6391        } else {
6392            skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
6393        }
6394    }
6395    lock.unlock();
6396    if (!skip_call)
6397        dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth);
6398}
6399
6400VKAPI_ATTR void VKAPI_CALL
6401CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
6402    bool skipCall = false;
6403    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6404    std::unique_lock<std::mutex> lock(global_lock);
6405    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6406    if (pCB) {
6407        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
6408        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
6409    }
6410    lock.unlock();
6411    if (!skipCall)
6412        dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp,
6413                                                         depthBiasSlopeFactor);
6414}
6415
6416VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
6417    bool skipCall = false;
6418    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6419    std::unique_lock<std::mutex> lock(global_lock);
6420    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6421    if (pCB) {
6422        skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
6423        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
6424    }
6425    lock.unlock();
6426    if (!skipCall)
6427        dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants);
6428}
6429
6430VKAPI_ATTR void VKAPI_CALL
6431CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
6432    bool skipCall = false;
6433    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6434    std::unique_lock<std::mutex> lock(global_lock);
6435    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6436    if (pCB) {
6437        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
6438        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
6439    }
6440    lock.unlock();
6441    if (!skipCall)
6442        dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
6443}
6444
6445VKAPI_ATTR void VKAPI_CALL
6446CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
6447    bool skipCall = false;
6448    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6449    std::unique_lock<std::mutex> lock(global_lock);
6450    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6451    if (pCB) {
6452        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
6453        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
6454    }
6455    lock.unlock();
6456    if (!skipCall)
6457        dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
6458}
6459
6460VKAPI_ATTR void VKAPI_CALL
6461CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
6462    bool skipCall = false;
6463    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6464    std::unique_lock<std::mutex> lock(global_lock);
6465    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6466    if (pCB) {
6467        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
6468        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
6469    }
6470    lock.unlock();
6471    if (!skipCall)
6472        dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
6473}
6474
6475VKAPI_ATTR void VKAPI_CALL
6476CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
6477    bool skipCall = false;
6478    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6479    std::unique_lock<std::mutex> lock(global_lock);
6480    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6481    if (pCB) {
6482        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
6483        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
6484    }
6485    lock.unlock();
6486    if (!skipCall)
6487        dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference);
6488}
6489
6490VKAPI_ATTR void VKAPI_CALL
6491CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
6492                      uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
6493                      const uint32_t *pDynamicOffsets) {
6494    bool skipCall = false;
6495    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6496    std::unique_lock<std::mutex> lock(global_lock);
6497    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6498    if (pCB) {
6499        if (pCB->state == CB_RECORDING) {
6500            // Track total count of dynamic descriptor types to make sure we have an offset for each one
6501            uint32_t totalDynamicDescriptors = 0;
6502            string errorString = "";
6503            uint32_t lastSetIndex = firstSet + setCount - 1;
6504            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
6505                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
6506                pCB->lastBound[pipelineBindPoint].dynamicOffsets.resize(lastSetIndex + 1);
6507            }
6508            auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
6509            for (uint32_t i = 0; i < setCount; i++) {
6510                cvdescriptorset::DescriptorSet *pSet = getSetNode(dev_data, pDescriptorSets[i]);
6511                if (pSet) {
6512                    pCB->lastBound[pipelineBindPoint].uniqueBoundSets.insert(pSet);
6513                    pSet->BindCommandBuffer(pCB);
6514                    pCB->lastBound[pipelineBindPoint].pipelineLayout = layout;
6515                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pSet;
6516                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
6517                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6518                                        DRAWSTATE_NONE, "DS", "DS 0x%" PRIxLEAST64 " bound on pipeline %s",
6519                                        (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
6520                    if (!pSet->IsUpdated() && (pSet->GetTotalDescriptorCount() != 0)) {
6521                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6522                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
6523                                            __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
6524                                            "DS 0x%" PRIxLEAST64
6525                                            " bound but it was never updated. You may want to either update it or not bind it.",
6526                                            (uint64_t)pDescriptorSets[i]);
6527                    }
6528                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
6529                    if (!verify_set_layout_compatibility(dev_data, pSet, layout, i + firstSet, errorString)) {
6530                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6531                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6532                                            DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
6533                                            "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
6534                                            "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s",
6535                                            i, i + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str());
6536                    }
6537
6538                    auto setDynamicDescriptorCount = pSet->GetDynamicDescriptorCount();
6539
6540                    pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i].clear();
6541
6542                    if (setDynamicDescriptorCount) {
6543                        // First make sure we won't overstep bounds of pDynamicOffsets array
6544                        if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) {
6545                            skipCall |=
6546                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6547                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6548                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
6549                                        "descriptorSet #%u (0x%" PRIxLEAST64
6550                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
6551                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
6552                                        i, (uint64_t)pDescriptorSets[i], pSet->GetDynamicDescriptorCount(),
6553                                        (dynamicOffsetCount - totalDynamicDescriptors));
6554                        } else { // Validate and store dynamic offsets with the set
6555                            // Validate Dynamic Offset Minimums
6556                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
6557                            for (uint32_t d = 0; d < pSet->GetTotalDescriptorCount(); d++) {
6558                                if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
6559                                    if (vk_safe_modulo(
6560                                            pDynamicOffsets[cur_dyn_offset],
6561                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
6562                                        skipCall |= log_msg(
6563                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6564                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
6565                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
6566                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6567                                            "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64,
6568                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6569                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
6570                                    }
6571                                    cur_dyn_offset++;
6572                                } else if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
6573                                    if (vk_safe_modulo(
6574                                            pDynamicOffsets[cur_dyn_offset],
6575                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
6576                                        skipCall |= log_msg(
6577                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6578                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
6579                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
6580                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6581                                            "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64,
6582                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6583                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
6584                                    }
6585                                    cur_dyn_offset++;
6586                                }
6587                            }
6588
6589                            pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i] =
6590                                std::vector<uint32_t>(pDynamicOffsets + totalDynamicDescriptors,
6591                                                      pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount);
6592                            // Keep running total of dynamic descriptor count to verify at the end
6593                            totalDynamicDescriptors += setDynamicDescriptorCount;
6594
6595                        }
6596                    }
6597                } else {
6598                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6599                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6600                                        DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS 0x%" PRIxLEAST64 " that doesn't exist!",
6601                                        (uint64_t)pDescriptorSets[i]);
6602                }
6603                skipCall |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
6604                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
6605                if (firstSet > 0) { // Check set #s below the first bound set
6606                    for (uint32_t i = 0; i < firstSet; ++i) {
6607                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
6608                            !verify_set_layout_compatibility(dev_data, pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i],
6609                                                             layout, i, errorString)) {
6610                            skipCall |= log_msg(
6611                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
6612                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6613                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
6614                                "DescriptorSetDS 0x%" PRIxLEAST64
6615                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
6616                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
6617                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
6618                        }
6619                    }
6620                }
6621                // Check if newly last bound set invalidates any remaining bound sets
6622                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
6623                    if (oldFinalBoundSet &&
6624                        !verify_set_layout_compatibility(dev_data, oldFinalBoundSet, layout, lastSetIndex, errorString)) {
6625                        auto old_set = oldFinalBoundSet->GetSet();
6626                        skipCall |=
6627                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
6628                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__,
6629                                    DRAWSTATE_NONE, "DS", "DescriptorSetDS 0x%" PRIxLEAST64
6630                                                          " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
6631                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
6632                                                          "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
6633                                    reinterpret_cast<uint64_t &>(old_set), lastSetIndex,
6634                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
6635                                    lastSetIndex + 1, (uint64_t)layout);
6636                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
6637                    }
6638                }
6639            }
6640            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
6641            if (totalDynamicDescriptors != dynamicOffsetCount) {
6642                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6643                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
6644                                    DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
6645                                    "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
6646                                    "is %u. It should exactly match the number of dynamic descriptors.",
6647                                    setCount, totalDynamicDescriptors, dynamicOffsetCount);
6648            }
6649        } else {
6650            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
6651        }
6652    }
6653    lock.unlock();
6654    if (!skipCall)
6655        dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
6656                                                               pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
6657}
6658
6659VKAPI_ATTR void VKAPI_CALL
6660CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
6661    bool skipCall = false;
6662    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6663    // TODO : Somewhere need to verify that IBs have correct usage state flagged
6664    std::unique_lock<std::mutex> lock(global_lock);
6665    VkDeviceMemory mem;
6666    skipCall =
6667        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6668    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
6669    if (cb_data != dev_data->commandBufferMap.end()) {
6670        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindIndexBuffer()"); };
6671        cb_data->second->validate_functions.push_back(function);
6672        skipCall |= addCmd(dev_data, cb_data->second, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
6673        VkDeviceSize offset_align = 0;
6674        switch (indexType) {
6675        case VK_INDEX_TYPE_UINT16:
6676            offset_align = 2;
6677            break;
6678        case VK_INDEX_TYPE_UINT32:
6679            offset_align = 4;
6680            break;
6681        default:
6682            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
6683            break;
6684        }
6685        if (!offset_align || (offset % offset_align)) {
6686            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6687                                DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
6688                                "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
6689                                offset, string_VkIndexType(indexType));
6690        }
6691        cb_data->second->status |= CBSTATUS_INDEX_BUFFER_BOUND;
6692    }
6693    lock.unlock();
6694    if (!skipCall)
6695        dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
6696}
6697
6698void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
6699    uint32_t end = firstBinding + bindingCount;
6700    if (pCB->currentDrawData.buffers.size() < end) {
6701        pCB->currentDrawData.buffers.resize(end);
6702    }
6703    for (uint32_t i = 0; i < bindingCount; ++i) {
6704        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
6705    }
6706}
6707
6708static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
6709
6710VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
6711                                                uint32_t bindingCount, const VkBuffer *pBuffers,
6712                                                const VkDeviceSize *pOffsets) {
6713    bool skipCall = false;
6714    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6715    // TODO : Somewhere need to verify that VBs have correct usage state flagged
6716    std::unique_lock<std::mutex> lock(global_lock);
6717    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
6718    if (cb_data != dev_data->commandBufferMap.end()) {
6719        for (uint32_t i = 0; i < bindingCount; ++i) {
6720            VkDeviceMemory mem;
6721            skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)pBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6722
6723            std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindVertexBuffers()"); };
6724            cb_data->second->validate_functions.push_back(function);
6725        }
6726        addCmd(dev_data, cb_data->second, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
6727        updateResourceTracking(cb_data->second, firstBinding, bindingCount, pBuffers);
6728    } else {
6729        skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
6730    }
6731    lock.unlock();
6732    if (!skipCall)
6733        dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
6734}
6735
6736/* expects global_lock to be held by caller */
6737static bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
6738    bool skip_call = false;
6739
6740    for (auto imageView : pCB->updateImages) {
6741        auto iv_data = dev_data->imageViewMap.find(imageView);
6742        if (iv_data == dev_data->imageViewMap.end())
6743            continue;
6744        VkImage image = iv_data->second.image;
6745        VkDeviceMemory mem;
6746        skip_call |=
6747            get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
6748        std::function<bool()> function = [=]() {
6749            set_memory_valid(dev_data, mem, true, image);
6750            return false;
6751        };
6752        pCB->validate_functions.push_back(function);
6753    }
6754    for (auto buffer : pCB->updateBuffers) {
6755        VkDeviceMemory mem;
6756        skip_call |= get_mem_binding_from_object(dev_data, (uint64_t)buffer,
6757                                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6758        std::function<bool()> function = [=]() {
6759            set_memory_valid(dev_data, mem, true);
6760            return false;
6761        };
6762        pCB->validate_functions.push_back(function);
6763    }
6764    return skip_call;
6765}
6766
6767VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
6768                                   uint32_t firstVertex, uint32_t firstInstance) {
6769    bool skipCall = false;
6770    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6771    std::unique_lock<std::mutex> lock(global_lock);
6772    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6773    if (pCB) {
6774        skipCall |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
6775        pCB->drawCount[DRAW]++;
6776        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS);
6777        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6778        // TODO : Need to pass commandBuffer as srcObj here
6779        skipCall |=
6780            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
6781                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW]++);
6782        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6783        if (!skipCall) {
6784            updateResourceTrackingOnDraw(pCB);
6785        }
6786        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
6787    }
6788    lock.unlock();
6789    if (!skipCall)
6790        dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
6791}
6792
6793VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
6794                                          uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
6795                                                            uint32_t firstInstance) {
6796    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6797    bool skipCall = false;
6798    std::unique_lock<std::mutex> lock(global_lock);
6799    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6800    if (pCB) {
6801        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
6802        pCB->drawCount[DRAW_INDEXED]++;
6803        skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS);
6804        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6805        // TODO : Need to pass commandBuffer as srcObj here
6806        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
6807                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
6808                            "vkCmdDrawIndexed() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
6809        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6810        if (!skipCall) {
6811            updateResourceTrackingOnDraw(pCB);
6812        }
6813        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
6814    }
6815    lock.unlock();
6816    if (!skipCall)
6817        dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
6818                                                        firstInstance);
6819}
6820
6821VKAPI_ATTR void VKAPI_CALL
6822CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
6823    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6824    bool skipCall = false;
6825    std::unique_lock<std::mutex> lock(global_lock);
6826    VkDeviceMemory mem;
6827    // MTMTODO : merge with code below
6828    skipCall =
6829        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6830    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndirect");
6831    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6832    if (pCB) {
6833        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
6834        pCB->drawCount[DRAW_INDIRECT]++;
6835        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS);
6836        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6837        // TODO : Need to pass commandBuffer as srcObj here
6838        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
6839                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
6840                            "vkCmdDrawIndirect() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
6841        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6842        if (!skipCall) {
6843            updateResourceTrackingOnDraw(pCB);
6844        }
6845        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndirect");
6846    }
6847    lock.unlock();
6848    if (!skipCall)
6849        dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
6850}
6851
6852VKAPI_ATTR void VKAPI_CALL
6853CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
6854    bool skipCall = false;
6855    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6856    std::unique_lock<std::mutex> lock(global_lock);
6857    VkDeviceMemory mem;
6858    // MTMTODO : merge with code below
6859    skipCall =
6860        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6861    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndexedIndirect");
6862    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6863    if (pCB) {
6864        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
6865        pCB->drawCount[DRAW_INDEXED_INDIRECT]++;
6866        skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS);
6867        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6868        // TODO : Need to pass commandBuffer as srcObj here
6869        skipCall |=
6870            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
6871                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call 0x%" PRIx64 ", reporting DS state:",
6872                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
6873        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6874        if (!skipCall) {
6875            updateResourceTrackingOnDraw(pCB);
6876        }
6877        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexedIndirect");
6878    }
6879    lock.unlock();
6880    if (!skipCall)
6881        dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
6882}
6883
6884VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
6885    bool skipCall = false;
6886    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6887    std::unique_lock<std::mutex> lock(global_lock);
6888    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6889    if (pCB) {
6890        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE);
6891        // TODO : Call below is temporary until call above can be re-enabled
6892        update_shader_storage_images_and_buffers(dev_data, pCB);
6893        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6894        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
6895        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
6896    }
6897    lock.unlock();
6898    if (!skipCall)
6899        dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
6900}
6901
6902VKAPI_ATTR void VKAPI_CALL
6903CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
6904    bool skipCall = false;
6905    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6906    std::unique_lock<std::mutex> lock(global_lock);
6907    VkDeviceMemory mem;
6908    skipCall =
6909        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6910    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDispatchIndirect");
6911    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6912    if (pCB) {
6913        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE);
6914        // TODO : Call below is temporary until call above can be re-enabled
6915        update_shader_storage_images_and_buffers(dev_data, pCB);
6916        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6917        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
6918        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatchIndirect");
6919    }
6920    lock.unlock();
6921    if (!skipCall)
6922        dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset);
6923}
6924
6925VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
6926                                         uint32_t regionCount, const VkBufferCopy *pRegions) {
6927    bool skipCall = false;
6928    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6929    std::unique_lock<std::mutex> lock(global_lock);
6930    VkDeviceMemory src_mem, dst_mem;
6931    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &src_mem);
6932    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyBuffer");
6933    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &dst_mem);
6934
6935    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyBuffer");
6936    // Validate that SRC & DST buffers have correct usage flags set
6937    skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
6938                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
6939    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
6940                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
6941    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
6942    if (cb_data != dev_data->commandBufferMap.end()) {
6943        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyBuffer()"); };
6944        cb_data->second->validate_functions.push_back(function);
6945        function = [=]() {
6946            set_memory_valid(dev_data, dst_mem, true);
6947            return false;
6948        };
6949        cb_data->second->validate_functions.push_back(function);
6950
6951        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
6952        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyBuffer");
6953    }
6954    lock.unlock();
6955    if (!skipCall)
6956        dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
6957}
6958
6959static bool VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, VkImageSubresourceLayers subLayers,
6960                                    VkImageLayout srcImageLayout) {
6961    bool skip_call = false;
6962
6963    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
6964    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
6965    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
6966        uint32_t layer = i + subLayers.baseArrayLayer;
6967        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
6968        IMAGE_CMD_BUF_LAYOUT_NODE node;
6969        if (!FindLayout(pCB, srcImage, sub, node)) {
6970            SetLayout(pCB, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
6971            continue;
6972        }
6973        if (node.layout != srcImageLayout) {
6974            // TODO: Improve log message in the next pass
6975            skip_call |=
6976                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
6977                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
6978                                                                        "and doesn't match the current layout %s.",
6979                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
6980        }
6981    }
6982    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
6983        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
6984            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
6985            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
6986                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
6987                                 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
6988        } else {
6989            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6990                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
6991                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
6992                                 string_VkImageLayout(srcImageLayout));
6993        }
6994    }
6995    return skip_call;
6996}
6997
6998static bool VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkImageSubresourceLayers subLayers,
6999                                  VkImageLayout destImageLayout) {
7000    bool skip_call = false;
7001
7002    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7003    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7004    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7005        uint32_t layer = i + subLayers.baseArrayLayer;
7006        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7007        IMAGE_CMD_BUF_LAYOUT_NODE node;
7008        if (!FindLayout(pCB, destImage, sub, node)) {
7009            SetLayout(pCB, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
7010            continue;
7011        }
7012        if (node.layout != destImageLayout) {
7013            skip_call |=
7014                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7015                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7016                                                                        "doesn't match the current layout %s.",
7017                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7018        }
7019    }
7020    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7021        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7022            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7023            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7024                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7025                                 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7026        } else {
7027            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7028                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
7029                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
7030                                 string_VkImageLayout(destImageLayout));
7031        }
7032    }
7033    return skip_call;
7034}
7035
7036VKAPI_ATTR void VKAPI_CALL
7037CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7038             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
7039    bool skipCall = false;
7040    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7041    std::unique_lock<std::mutex> lock(global_lock);
7042    VkDeviceMemory src_mem, dst_mem;
7043    // Validate that src & dst images have correct usage flags set
7044    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7045    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyImage");
7046
7047    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7048    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyImage");
7049    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7050                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7051    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7052                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7053    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7054    if (cb_data != dev_data->commandBufferMap.end()) {
7055        std::function<bool()> function = [=]() {
7056            return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyImage()", srcImage);
7057        };
7058        cb_data->second->validate_functions.push_back(function);
7059        function = [=]() {
7060            set_memory_valid(dev_data, dst_mem, true, dstImage);
7061            return false;
7062        };
7063        cb_data->second->validate_functions.push_back(function);
7064
7065        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYIMAGE, "vkCmdCopyImage()");
7066        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyImage");
7067        for (uint32_t i = 0; i < regionCount; ++i) {
7068            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].srcSubresource, srcImageLayout);
7069            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].dstSubresource, dstImageLayout);
7070        }
7071    }
7072    lock.unlock();
7073    if (!skipCall)
7074        dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7075                                                      regionCount, pRegions);
7076}
7077
7078VKAPI_ATTR void VKAPI_CALL
7079CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7080             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
7081    bool skipCall = false;
7082    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7083    std::unique_lock<std::mutex> lock(global_lock);
7084    VkDeviceMemory src_mem, dst_mem;
7085    // Validate that src & dst images have correct usage flags set
7086    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7087    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdBlitImage");
7088
7089    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7090    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdBlitImage");
7091    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7092                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7093    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7094                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7095
7096    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7097    if (cb_data != dev_data->commandBufferMap.end()) {
7098        std::function<bool()> function = [=]() {
7099            return validate_memory_is_valid(dev_data, src_mem, "vkCmdBlitImage()", srcImage);
7100        };
7101        cb_data->second->validate_functions.push_back(function);
7102        function = [=]() {
7103            set_memory_valid(dev_data, dst_mem, true, dstImage);
7104            return false;
7105        };
7106        cb_data->second->validate_functions.push_back(function);
7107
7108        skipCall |= addCmd(dev_data, cb_data->second, CMD_BLITIMAGE, "vkCmdBlitImage()");
7109        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdBlitImage");
7110    }
7111    lock.unlock();
7112    if (!skipCall)
7113        dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7114                                                      regionCount, pRegions, filter);
7115}
7116
7117VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
7118                                                VkImage dstImage, VkImageLayout dstImageLayout,
7119                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7120    bool skipCall = false;
7121    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7122    std::unique_lock<std::mutex> lock(global_lock);
7123    VkDeviceMemory dst_mem, src_mem;
7124    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7125    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyBufferToImage");
7126
7127    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &src_mem);
7128    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyBufferToImage");
7129    // Validate that src buff & dst image have correct usage flags set
7130    skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyBufferToImage()",
7131                                            "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7132    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyBufferToImage()",
7133                                           "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7134    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7135    if (cb_data != dev_data->commandBufferMap.end()) {
7136        std::function<bool()> function = [=]() {
7137            set_memory_valid(dev_data, dst_mem, true, dstImage);
7138            return false;
7139        };
7140        cb_data->second->validate_functions.push_back(function);
7141        function = [=]() { return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyBufferToImage()"); };
7142        cb_data->second->validate_functions.push_back(function);
7143
7144        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
7145        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyBufferToImage");
7146        for (uint32_t i = 0; i < regionCount; ++i) {
7147            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].imageSubresource, dstImageLayout);
7148        }
7149    }
7150    lock.unlock();
7151    if (!skipCall)
7152        dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
7153                                                              pRegions);
7154}
7155
7156VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
7157                                                VkImageLayout srcImageLayout, VkBuffer dstBuffer,
7158                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7159    bool skipCall = false;
7160    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7161    std::unique_lock<std::mutex> lock(global_lock);
7162    VkDeviceMemory src_mem, dst_mem;
7163    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7164    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyImageToBuffer");
7165
7166    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &dst_mem);
7167    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyImageToBuffer");
7168    // Validate that dst buff & src image have correct usage flags set
7169    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyImageToBuffer()",
7170                                           "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7171    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyImageToBuffer()",
7172                                            "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7173
7174    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7175    if (cb_data != dev_data->commandBufferMap.end()) {
7176        std::function<bool()> function = [=]() {
7177            return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyImageToBuffer()", srcImage);
7178        };
7179        cb_data->second->validate_functions.push_back(function);
7180        function = [=]() {
7181            set_memory_valid(dev_data, dst_mem, true);
7182            return false;
7183        };
7184        cb_data->second->validate_functions.push_back(function);
7185
7186        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
7187        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyImageToBuffer");
7188        for (uint32_t i = 0; i < regionCount; ++i) {
7189            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].imageSubresource, srcImageLayout);
7190        }
7191    }
7192    lock.unlock();
7193    if (!skipCall)
7194        dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
7195                                                              pRegions);
7196}
7197
7198VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
7199                                           VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
7200    bool skipCall = false;
7201    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7202    std::unique_lock<std::mutex> lock(global_lock);
7203    VkDeviceMemory mem;
7204    skipCall =
7205        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7206    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdUpdateBuffer");
7207    // Validate that dst buff has correct usage flags set
7208    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdUpdateBuffer()",
7209                                            "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7210
7211    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7212    if (cb_data != dev_data->commandBufferMap.end()) {
7213        std::function<bool()> function = [=]() {
7214            set_memory_valid(dev_data, mem, true);
7215            return false;
7216        };
7217        cb_data->second->validate_functions.push_back(function);
7218
7219        skipCall |= addCmd(dev_data, cb_data->second, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
7220        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyUpdateBuffer");
7221    }
7222    lock.unlock();
7223    if (!skipCall)
7224        dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
7225}
7226
7227VKAPI_ATTR void VKAPI_CALL
7228CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
7229    bool skipCall = false;
7230    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7231    std::unique_lock<std::mutex> lock(global_lock);
7232    VkDeviceMemory mem;
7233    skipCall =
7234        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7235    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdFillBuffer");
7236    // Validate that dst buff has correct usage flags set
7237    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdFillBuffer()",
7238                                            "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7239
7240    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7241    if (cb_data != dev_data->commandBufferMap.end()) {
7242        std::function<bool()> function = [=]() {
7243            set_memory_valid(dev_data, mem, true);
7244            return false;
7245        };
7246        cb_data->second->validate_functions.push_back(function);
7247
7248        skipCall |= addCmd(dev_data, cb_data->second, CMD_FILLBUFFER, "vkCmdFillBuffer()");
7249        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyFillBuffer");
7250    }
7251    lock.unlock();
7252    if (!skipCall)
7253        dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
7254}
7255
7256VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
7257                                               const VkClearAttachment *pAttachments, uint32_t rectCount,
7258                                               const VkClearRect *pRects) {
7259    bool skipCall = false;
7260    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7261    std::unique_lock<std::mutex> lock(global_lock);
7262    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7263    if (pCB) {
7264        skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
7265        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
7266        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
7267            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
7268            // TODO : commandBuffer should be srcObj
7269            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
7270            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
7271            // call CmdClearAttachments
7272            // Otherwise this seems more like a performance warning.
7273            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7274                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
7275                                "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
7276                                " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
7277                                (uint64_t)(commandBuffer));
7278        }
7279        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments");
7280    }
7281
7282    // Validate that attachment is in reference list of active subpass
7283    if (pCB->activeRenderPass) {
7284        const VkRenderPassCreateInfo *pRPCI = pCB->activeRenderPass->pCreateInfo;
7285        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
7286
7287        for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
7288            const VkClearAttachment *attachment = &pAttachments[attachment_idx];
7289            if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
7290                bool found = false;
7291                for (uint32_t i = 0; i < pSD->colorAttachmentCount; i++) {
7292                    if (attachment->colorAttachment == pSD->pColorAttachments[i].attachment) {
7293                        found = true;
7294                        break;
7295                    }
7296                }
7297                if (!found) {
7298                    skipCall |= log_msg(
7299                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7300                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
7301                        "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d",
7302                        attachment->colorAttachment, pCB->activeSubpass);
7303                }
7304            } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
7305                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
7306                    (pSD->pDepthStencilAttachment->attachment ==
7307                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
7308
7309                    skipCall |= log_msg(
7310                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7311                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
7312                        "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found "
7313                        "in active subpass %d",
7314                        attachment->colorAttachment,
7315                        (pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED,
7316                        pCB->activeSubpass);
7317                }
7318            }
7319        }
7320    }
7321    lock.unlock();
7322    if (!skipCall)
7323        dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
7324}
7325
7326VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
7327                                              VkImageLayout imageLayout, const VkClearColorValue *pColor,
7328                                              uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
7329    bool skipCall = false;
7330    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7331    std::unique_lock<std::mutex> lock(global_lock);
7332    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
7333    VkDeviceMemory mem;
7334    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7335    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearColorImage");
7336    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7337    if (cb_data != dev_data->commandBufferMap.end()) {
7338        std::function<bool()> function = [=]() {
7339            set_memory_valid(dev_data, mem, true, image);
7340            return false;
7341        };
7342        cb_data->second->validate_functions.push_back(function);
7343
7344        skipCall |= addCmd(dev_data, cb_data->second, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
7345        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdClearColorImage");
7346    }
7347    lock.unlock();
7348    if (!skipCall)
7349        dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
7350}
7351
7352VKAPI_ATTR void VKAPI_CALL
7353CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
7354                          const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
7355                          const VkImageSubresourceRange *pRanges) {
7356    bool skipCall = false;
7357    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7358    std::unique_lock<std::mutex> lock(global_lock);
7359    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
7360    VkDeviceMemory mem;
7361    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7362    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearDepthStencilImage");
7363    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7364    if (cb_data != dev_data->commandBufferMap.end()) {
7365        std::function<bool()> function = [=]() {
7366            set_memory_valid(dev_data, mem, true, image);
7367            return false;
7368        };
7369        cb_data->second->validate_functions.push_back(function);
7370
7371        skipCall |= addCmd(dev_data, cb_data->second, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
7372        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdClearDepthStencilImage");
7373    }
7374    lock.unlock();
7375    if (!skipCall)
7376        dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
7377                                                                   pRanges);
7378}
7379
7380VKAPI_ATTR void VKAPI_CALL
7381CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7382                VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
7383    bool skipCall = false;
7384    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7385    std::unique_lock<std::mutex> lock(global_lock);
7386    VkDeviceMemory src_mem, dst_mem;
7387    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7388    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdResolveImage");
7389
7390    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7391    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdResolveImage");
7392    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7393    if (cb_data != dev_data->commandBufferMap.end()) {
7394        std::function<bool()> function = [=]() {
7395            return validate_memory_is_valid(dev_data, src_mem, "vkCmdResolveImage()", srcImage);
7396        };
7397        cb_data->second->validate_functions.push_back(function);
7398        function = [=]() {
7399            set_memory_valid(dev_data, dst_mem, true, dstImage);
7400            return false;
7401        };
7402        cb_data->second->validate_functions.push_back(function);
7403
7404        skipCall |= addCmd(dev_data, cb_data->second, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
7405        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdResolveImage");
7406    }
7407    lock.unlock();
7408    if (!skipCall)
7409        dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7410                                                         regionCount, pRegions);
7411}
7412
7413bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7414    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7415    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7416    if (pCB) {
7417        pCB->eventToStageMap[event] = stageMask;
7418    }
7419    auto queue_data = dev_data->queueMap.find(queue);
7420    if (queue_data != dev_data->queueMap.end()) {
7421        queue_data->second.eventToStageMap[event] = stageMask;
7422    }
7423    return false;
7424}
7425
7426VKAPI_ATTR void VKAPI_CALL
7427CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7428    bool skipCall = false;
7429    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7430    std::unique_lock<std::mutex> lock(global_lock);
7431    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7432    if (pCB) {
7433        skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
7434        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
7435        pCB->events.push_back(event);
7436        if (!pCB->waitedEvents.count(event)) {
7437            pCB->writeEventsBeforeWait.push_back(event);
7438        }
7439        std::function<bool(VkQueue)> eventUpdate =
7440            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
7441        pCB->eventUpdates.push_back(eventUpdate);
7442    }
7443    lock.unlock();
7444    if (!skipCall)
7445        dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask);
7446}
7447
7448VKAPI_ATTR void VKAPI_CALL
7449CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7450    bool skipCall = false;
7451    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7452    std::unique_lock<std::mutex> lock(global_lock);
7453    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7454    if (pCB) {
7455        skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
7456        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
7457        pCB->events.push_back(event);
7458        if (!pCB->waitedEvents.count(event)) {
7459            pCB->writeEventsBeforeWait.push_back(event);
7460        }
7461        std::function<bool(VkQueue)> eventUpdate =
7462            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
7463        pCB->eventUpdates.push_back(eventUpdate);
7464    }
7465    lock.unlock();
7466    if (!skipCall)
7467        dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask);
7468}
7469
7470static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
7471                                   const VkImageMemoryBarrier *pImgMemBarriers) {
7472    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7473    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7474    bool skip = false;
7475    uint32_t levelCount = 0;
7476    uint32_t layerCount = 0;
7477
7478    for (uint32_t i = 0; i < memBarrierCount; ++i) {
7479        auto mem_barrier = &pImgMemBarriers[i];
7480        if (!mem_barrier)
7481            continue;
7482        // TODO: Do not iterate over every possibility - consolidate where
7483        // possible
7484        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
7485
7486        for (uint32_t j = 0; j < levelCount; j++) {
7487            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
7488            for (uint32_t k = 0; k < layerCount; k++) {
7489                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
7490                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
7491                IMAGE_CMD_BUF_LAYOUT_NODE node;
7492                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
7493                    SetLayout(pCB, mem_barrier->image, sub,
7494                              IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
7495                    continue;
7496                }
7497                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
7498                    // TODO: Set memory invalid which is in mem_tracker currently
7499                } else if (node.layout != mem_barrier->oldLayout) {
7500                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7501                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
7502                                                                                    "when current layout is %s.",
7503                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
7504                }
7505                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
7506            }
7507        }
7508    }
7509    return skip;
7510}
7511
7512// Print readable FlagBits in FlagMask
7513static std::string string_VkAccessFlags(VkAccessFlags accessMask) {
7514    std::string result;
7515    std::string separator;
7516
7517    if (accessMask == 0) {
7518        result = "[None]";
7519    } else {
7520        result = "[";
7521        for (auto i = 0; i < 32; i++) {
7522            if (accessMask & (1 << i)) {
7523                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
7524                separator = " | ";
7525            }
7526        }
7527        result = result + "]";
7528    }
7529    return result;
7530}
7531
7532// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
7533// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
7534// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
7535static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
7536                             const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
7537                             const char *type) {
7538    bool skip_call = false;
7539
7540    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
7541        if (accessMask & ~(required_bit | optional_bits)) {
7542            // TODO: Verify against Valid Use
7543            skip_call |=
7544                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7545                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
7546                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
7547        }
7548    } else {
7549        if (!required_bit) {
7550            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7551                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
7552                                                                  "%s when layout is %s, unless the app has previously added a "
7553                                                                  "barrier for this transition.",
7554                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
7555                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
7556        } else {
7557            std::string opt_bits;
7558            if (optional_bits != 0) {
7559                std::stringstream ss;
7560                ss << optional_bits;
7561                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
7562            }
7563            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7564                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
7565                                                                  "layout is %s, unless the app has previously added a barrier for "
7566                                                                  "this transition.",
7567                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
7568                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
7569        }
7570    }
7571    return skip_call;
7572}
7573
7574static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
7575                                        const VkImageLayout &layout, const char *type) {
7576    bool skip_call = false;
7577    switch (layout) {
7578    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
7579        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
7580                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
7581        break;
7582    }
7583    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
7584        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
7585                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
7586        break;
7587    }
7588    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
7589        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
7590        break;
7591    }
7592    case VK_IMAGE_LAYOUT_PREINITIALIZED: {
7593        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
7594        break;
7595    }
7596    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
7597        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
7598                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
7599        break;
7600    }
7601    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
7602        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
7603                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
7604        break;
7605    }
7606    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
7607        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
7608        break;
7609    }
7610    case VK_IMAGE_LAYOUT_UNDEFINED: {
7611        if (accessMask != 0) {
7612            // TODO: Verify against Valid Use section spec
7613            skip_call |=
7614                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7615                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
7616                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
7617        }
7618        break;
7619    }
7620    case VK_IMAGE_LAYOUT_GENERAL:
7621    default: { break; }
7622    }
7623    return skip_call;
7624}
7625
7626static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
7627                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
7628                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
7629                             const VkImageMemoryBarrier *pImageMemBarriers) {
7630    bool skip_call = false;
7631    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7632    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7633    if (pCB->activeRenderPass && memBarrierCount) {
7634        if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
7635            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7636                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
7637                                                                  "with no self dependency specified.",
7638                                 funcName, pCB->activeSubpass);
7639        }
7640    }
7641    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
7642        auto mem_barrier = &pImageMemBarriers[i];
7643        auto image_data = dev_data->imageMap.find(mem_barrier->image);
7644        if (image_data != dev_data->imageMap.end()) {
7645            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
7646            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
7647            if (image_data->second.createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
7648                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
7649                // be VK_QUEUE_FAMILY_IGNORED
7650                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
7651                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7652                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7653                                         "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
7654                                         "VK_SHARING_MODE_CONCURRENT.  Src and dst "
7655                                         " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
7656                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
7657                }
7658            } else {
7659                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
7660                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
7661                // or both be a valid queue family
7662                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
7663                    (src_q_f_index != dst_q_f_index)) {
7664                    skip_call |=
7665                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7666                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
7667                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
7668                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
7669                                                                     "must be.",
7670                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
7671                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
7672                           ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
7673                            (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
7674                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7675                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7676                                         "%s: Image 0x%" PRIx64 " was created with sharingMode "
7677                                         "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
7678                                         " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
7679                                         "queueFamilies crated for this device.",
7680                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
7681                                         dst_q_f_index, dev_data->phys_dev_properties.queue_family_properties.size());
7682                }
7683            }
7684        }
7685
7686        if (mem_barrier) {
7687            skip_call |=
7688                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
7689            skip_call |=
7690                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
7691            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
7692                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7693                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
7694                                                         "PREINITIALIZED.",
7695                        funcName);
7696            }
7697            auto image_data = dev_data->imageMap.find(mem_barrier->image);
7698            VkFormat format = VK_FORMAT_UNDEFINED;
7699            uint32_t arrayLayers = 0, mipLevels = 0;
7700            bool imageFound = false;
7701            if (image_data != dev_data->imageMap.end()) {
7702                format = image_data->second.createInfo.format;
7703                arrayLayers = image_data->second.createInfo.arrayLayers;
7704                mipLevels = image_data->second.createInfo.mipLevels;
7705                imageFound = true;
7706            } else if (dev_data->device_extensions.wsi_enabled) {
7707                auto imageswap_data = dev_data->device_extensions.imageToSwapchainMap.find(mem_barrier->image);
7708                if (imageswap_data != dev_data->device_extensions.imageToSwapchainMap.end()) {
7709                    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(imageswap_data->second);
7710                    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
7711                        format = swapchain_data->second->createInfo.imageFormat;
7712                        arrayLayers = swapchain_data->second->createInfo.imageArrayLayers;
7713                        mipLevels = 1;
7714                        imageFound = true;
7715                    }
7716                }
7717            }
7718            if (imageFound) {
7719                if (vk_format_is_depth_and_stencil(format) &&
7720                    (!(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
7721                     !(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))) {
7722                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7723                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth and stencil format and thus must "
7724                                                             "have both VK_IMAGE_ASPECT_DEPTH_BIT and "
7725                                                             "VK_IMAGE_ASPECT_STENCIL_BIT set.",
7726                            funcName);
7727                }
7728                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
7729                                     ? 1
7730                                     : mem_barrier->subresourceRange.layerCount;
7731                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
7732                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7733                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
7734                                                             "baseArrayLayer (%d) and layerCount (%d) be less "
7735                                                             "than or equal to the total number of layers (%d).",
7736                            funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
7737                            arrayLayers);
7738                }
7739                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
7740                                     ? 1
7741                                     : mem_barrier->subresourceRange.levelCount;
7742                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
7743                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7744                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
7745                                                             "(%d) and levelCount (%d) be less than or equal to "
7746                                                             "the total number of levels (%d).",
7747                            funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
7748                            mipLevels);
7749                }
7750            }
7751        }
7752    }
7753    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
7754        auto mem_barrier = &pBufferMemBarriers[i];
7755        if (pCB->activeRenderPass) {
7756            skip_call |=
7757                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7758                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
7759        }
7760        if (!mem_barrier)
7761            continue;
7762
7763        // Validate buffer barrier queue family indices
7764        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
7765             mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
7766            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
7767             mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
7768            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7769                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7770                                 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
7771                                 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
7772                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
7773                                 dev_data->phys_dev_properties.queue_family_properties.size());
7774        }
7775
7776        auto buffer_data = dev_data->bufferMap.find(mem_barrier->buffer);
7777        if (buffer_data != dev_data->bufferMap.end()) {
7778            VkDeviceSize buffer_size = (buffer_data->second.createInfo.sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO)
7779                                           ? buffer_data->second.createInfo.size
7780                                           : 0;
7781            if (mem_barrier->offset >= buffer_size) {
7782                skip_call |= log_msg(
7783                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7784                    DRAWSTATE_INVALID_BARRIER, "DS",
7785                    "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
7786                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
7787                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(buffer_size));
7788            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
7789                skip_call |= log_msg(
7790                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7791                    DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
7792                                                     " whose sum is greater than total size 0x%" PRIx64 ".",
7793                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
7794                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
7795                    reinterpret_cast<const uint64_t &>(buffer_size));
7796            }
7797        }
7798    }
7799    return skip_call;
7800}
7801
7802bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
7803    bool skip_call = false;
7804    VkPipelineStageFlags stageMask = 0;
7805    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
7806    for (uint32_t i = 0; i < eventCount; ++i) {
7807        auto event = pCB->events[firstEventIndex + i];
7808        auto queue_data = dev_data->queueMap.find(queue);
7809        if (queue_data == dev_data->queueMap.end())
7810            return false;
7811        auto event_data = queue_data->second.eventToStageMap.find(event);
7812        if (event_data != queue_data->second.eventToStageMap.end()) {
7813            stageMask |= event_data->second;
7814        } else {
7815            auto global_event_data = dev_data->eventMap.find(event);
7816            if (global_event_data == dev_data->eventMap.end()) {
7817                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
7818                                     reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
7819                                     "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
7820                                     reinterpret_cast<const uint64_t &>(event));
7821            } else {
7822                stageMask |= global_event_data->second.stageMask;
7823            }
7824        }
7825    }
7826    // TODO: Need to validate that host_bit is only set if set event is called
7827    // but set event can be called at any time.
7828    if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
7829        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7830                             DRAWSTATE_INVALID_EVENT, "DS", "Submitting cmdbuffer with call to VkCmdWaitEvents "
7831                                                            "using srcStageMask 0x%x which must be the bitwise "
7832                                                            "OR of the stageMask parameters used in calls to "
7833                                                            "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
7834                                                            "used with vkSetEvent but instead is 0x%x.",
7835                             sourceStageMask, stageMask);
7836    }
7837    return skip_call;
7838}
7839
7840VKAPI_ATTR void VKAPI_CALL
7841CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
7842              VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
7843              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
7844              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
7845    bool skipCall = false;
7846    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7847    std::unique_lock<std::mutex> lock(global_lock);
7848    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7849    if (pCB) {
7850        auto firstEventIndex = pCB->events.size();
7851        for (uint32_t i = 0; i < eventCount; ++i) {
7852            pCB->waitedEvents.insert(pEvents[i]);
7853            pCB->events.push_back(pEvents[i]);
7854        }
7855        std::function<bool(VkQueue)> eventUpdate =
7856            std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask);
7857        pCB->eventUpdates.push_back(eventUpdate);
7858        if (pCB->state == CB_RECORDING) {
7859            skipCall |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
7860        } else {
7861            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
7862        }
7863        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
7864        skipCall |=
7865            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7866                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7867    }
7868    lock.unlock();
7869    if (!skipCall)
7870        dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
7871                                                       memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7872                                                       pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7873}
7874
7875VKAPI_ATTR void VKAPI_CALL
7876CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
7877                   VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
7878                   uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
7879                   uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
7880    bool skipCall = false;
7881    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7882    std::unique_lock<std::mutex> lock(global_lock);
7883    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7884    if (pCB) {
7885        skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
7886        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
7887        skipCall |=
7888            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7889                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7890    }
7891    lock.unlock();
7892    if (!skipCall)
7893        dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
7894                                                            memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7895                                                            pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7896}
7897
7898bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
7899    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7900    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7901    if (pCB) {
7902        pCB->queryToStateMap[object] = value;
7903    }
7904    auto queue_data = dev_data->queueMap.find(queue);
7905    if (queue_data != dev_data->queueMap.end()) {
7906        queue_data->second.queryToStateMap[object] = value;
7907    }
7908    return false;
7909}
7910
7911VKAPI_ATTR void VKAPI_CALL
7912CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
7913    bool skipCall = false;
7914    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7915    std::unique_lock<std::mutex> lock(global_lock);
7916    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7917    if (pCB) {
7918        QueryObject query = {queryPool, slot};
7919        pCB->activeQueries.insert(query);
7920        if (!pCB->startedQueries.count(query)) {
7921            pCB->startedQueries.insert(query);
7922        }
7923        skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
7924    }
7925    lock.unlock();
7926    if (!skipCall)
7927        dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
7928}
7929
7930VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
7931    bool skipCall = false;
7932    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7933    std::unique_lock<std::mutex> lock(global_lock);
7934    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7935    if (pCB) {
7936        QueryObject query = {queryPool, slot};
7937        if (!pCB->activeQueries.count(query)) {
7938            skipCall |=
7939                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7940                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d",
7941                        (uint64_t)(queryPool), slot);
7942        } else {
7943            pCB->activeQueries.erase(query);
7944        }
7945        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
7946        pCB->queryUpdates.push_back(queryUpdate);
7947        if (pCB->state == CB_RECORDING) {
7948            skipCall |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
7949        } else {
7950            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
7951        }
7952    }
7953    lock.unlock();
7954    if (!skipCall)
7955        dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
7956}
7957
7958VKAPI_ATTR void VKAPI_CALL
7959CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
7960    bool skipCall = false;
7961    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7962    std::unique_lock<std::mutex> lock(global_lock);
7963    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7964    if (pCB) {
7965        for (uint32_t i = 0; i < queryCount; i++) {
7966            QueryObject query = {queryPool, firstQuery + i};
7967            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
7968            std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false);
7969            pCB->queryUpdates.push_back(queryUpdate);
7970        }
7971        if (pCB->state == CB_RECORDING) {
7972            skipCall |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
7973        } else {
7974            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
7975        }
7976        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
7977    }
7978    lock.unlock();
7979    if (!skipCall)
7980        dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
7981}
7982
7983bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) {
7984    bool skip_call = false;
7985    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
7986    auto queue_data = dev_data->queueMap.find(queue);
7987    if (queue_data == dev_data->queueMap.end())
7988        return false;
7989    for (uint32_t i = 0; i < queryCount; i++) {
7990        QueryObject query = {queryPool, firstQuery + i};
7991        auto query_data = queue_data->second.queryToStateMap.find(query);
7992        bool fail = false;
7993        if (query_data != queue_data->second.queryToStateMap.end()) {
7994            if (!query_data->second) {
7995                fail = true;
7996            }
7997        } else {
7998            auto global_query_data = dev_data->queryToStateMap.find(query);
7999            if (global_query_data != dev_data->queryToStateMap.end()) {
8000                if (!global_query_data->second) {
8001                    fail = true;
8002                }
8003            } else {
8004                fail = true;
8005            }
8006        }
8007        if (fail) {
8008            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8009                                 DRAWSTATE_INVALID_QUERY, "DS",
8010                                 "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
8011                                 reinterpret_cast<uint64_t &>(queryPool), firstQuery + i);
8012        }
8013    }
8014    return skip_call;
8015}
8016
8017VKAPI_ATTR void VKAPI_CALL
8018CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
8019                        VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
8020    bool skipCall = false;
8021    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8022    std::unique_lock<std::mutex> lock(global_lock);
8023    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8024#if MTMERGESOURCE
8025    VkDeviceMemory mem;
8026    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8027    skipCall |=
8028        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8029    if (cb_data != dev_data->commandBufferMap.end()) {
8030        std::function<bool()> function = [=]() {
8031            set_memory_valid(dev_data, mem, true);
8032            return false;
8033        };
8034        cb_data->second->validate_functions.push_back(function);
8035    }
8036    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyQueryPoolResults");
8037    // Validate that DST buffer has correct usage flags set
8038    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8039                                            "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8040#endif
8041    if (pCB) {
8042        std::function<bool(VkQueue)> queryUpdate =
8043            std::bind(validateQuery, std::placeholders::_1, pCB, queryPool, queryCount, firstQuery);
8044        pCB->queryUpdates.push_back(queryUpdate);
8045        if (pCB->state == CB_RECORDING) {
8046            skipCall |= addCmd(dev_data, pCB, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8047        } else {
8048            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
8049        }
8050        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyQueryPoolResults");
8051    }
8052    lock.unlock();
8053    if (!skipCall)
8054        dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
8055                                                                 dstOffset, stride, flags);
8056}
8057
8058VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
8059                                            VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
8060                                            const void *pValues) {
8061    bool skipCall = false;
8062    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8063    std::unique_lock<std::mutex> lock(global_lock);
8064    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8065    if (pCB) {
8066        if (pCB->state == CB_RECORDING) {
8067            skipCall |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8068        } else {
8069            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
8070        }
8071    }
8072    skipCall |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
8073    if (0 == stageFlags) {
8074        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8075                            DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() call has no stageFlags set.");
8076    }
8077
8078    // Check if push constant update is within any of the ranges with the same stage flags specified in pipeline layout.
8079    auto pipeline_layout = getPipelineLayout(dev_data, layout);
8080    if (!pipeline_layout) {
8081        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8082                            DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() Pipeline Layout 0x%" PRIx64 " not found.",
8083                            (uint64_t)layout);
8084    } else {
8085        // Coalesce adjacent/overlapping pipeline ranges before checking to see if incoming range is
8086        // contained in the pipeline ranges.
8087        // Build a {start, end} span list for ranges with matching stage flags.
8088        const auto &ranges = pipeline_layout->pushConstantRanges;
8089        struct span {
8090            uint32_t start;
8091            uint32_t end;
8092        };
8093        std::vector<span> spans;
8094        spans.reserve(ranges.size());
8095        for (const auto &iter : ranges) {
8096            if (iter.stageFlags == stageFlags) {
8097                spans.push_back({iter.offset, iter.offset + iter.size});
8098            }
8099        }
8100        if (spans.size() == 0) {
8101            // There were no ranges that matched the stageFlags.
8102            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8103                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS",
8104                                "vkCmdPushConstants() stageFlags = 0x%" PRIx32 " do not match "
8105                                "the stageFlags in any of the ranges in pipeline layout 0x%" PRIx64 ".",
8106                                (uint32_t)stageFlags, (uint64_t)layout);
8107        } else {
8108            // Sort span list by start value.
8109            struct comparer {
8110                bool operator()(struct span i, struct span j) { return i.start < j.start; }
8111            } my_comparer;
8112            std::sort(spans.begin(), spans.end(), my_comparer);
8113
8114            // Examine two spans at a time.
8115            std::vector<span>::iterator current = spans.begin();
8116            std::vector<span>::iterator next = current + 1;
8117            while (next != spans.end()) {
8118                if (current->end < next->start) {
8119                    // There is a gap; cannot coalesce. Move to the next two spans.
8120                    ++current;
8121                    ++next;
8122                } else {
8123                    // Coalesce the two spans.  The start of the next span
8124                    // is within the current span, so pick the larger of
8125                    // the end values to extend the current span.
8126                    // Then delete the next span and set next to the span after it.
8127                    current->end = max(current->end, next->end);
8128                    next = spans.erase(next);
8129                }
8130            }
8131
8132            // Now we can check if the incoming range is within any of the spans.
8133            bool contained_in_a_range = false;
8134            for (uint32_t i = 0; i < spans.size(); ++i) {
8135                if ((offset >= spans[i].start) && ((uint64_t)offset + (uint64_t)size <= (uint64_t)spans[i].end)) {
8136                    contained_in_a_range = true;
8137                    break;
8138                }
8139            }
8140            if (!contained_in_a_range) {
8141                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8142                                    __LINE__, DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS",
8143                                    "vkCmdPushConstants() Push constant range [%d, %d) "
8144                                    "with stageFlags = 0x%" PRIx32 " "
8145                                    "not within flag-matching ranges in pipeline layout 0x%" PRIx64 ".",
8146                                    offset, offset + size, (uint32_t)stageFlags, (uint64_t)layout);
8147            }
8148        }
8149    }
8150    lock.unlock();
8151    if (!skipCall)
8152        dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8153}
8154
8155VKAPI_ATTR void VKAPI_CALL
8156CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
8157    bool skipCall = false;
8158    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8159    std::unique_lock<std::mutex> lock(global_lock);
8160    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8161    if (pCB) {
8162        QueryObject query = {queryPool, slot};
8163        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
8164        pCB->queryUpdates.push_back(queryUpdate);
8165        if (pCB->state == CB_RECORDING) {
8166            skipCall |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8167        } else {
8168            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
8169        }
8170    }
8171    lock.unlock();
8172    if (!skipCall)
8173        dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8174}
8175
8176VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
8177                                                 const VkAllocationCallbacks *pAllocator,
8178                                                 VkFramebuffer *pFramebuffer) {
8179    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8180    VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
8181    if (VK_SUCCESS == result) {
8182        // Shadow create info and store in map
8183        std::lock_guard<std::mutex> lock(global_lock);
8184
8185        auto & fbNode = dev_data->frameBufferMap[*pFramebuffer];
8186        fbNode.createInfo = *pCreateInfo;
8187        if (pCreateInfo->pAttachments) {
8188            auto attachments = new VkImageView[pCreateInfo->attachmentCount];
8189            memcpy(attachments,
8190                   pCreateInfo->pAttachments,
8191                   pCreateInfo->attachmentCount * sizeof(VkImageView));
8192            fbNode.createInfo.pAttachments = attachments;
8193        }
8194        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8195            VkImageView view = pCreateInfo->pAttachments[i];
8196            auto view_data = dev_data->imageViewMap.find(view);
8197            if (view_data == dev_data->imageViewMap.end()) {
8198                continue;
8199            }
8200            MT_FB_ATTACHMENT_INFO fb_info;
8201            get_mem_binding_from_object(dev_data, (uint64_t)(view_data->second.image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8202                                        &fb_info.mem);
8203            fb_info.image = view_data->second.image;
8204            fbNode.attachments.push_back(fb_info);
8205        }
8206    }
8207    return result;
8208}
8209
8210static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
8211                           std::unordered_set<uint32_t> &processed_nodes) {
8212    // If we have already checked this node we have not found a dependency path so return false.
8213    if (processed_nodes.count(index))
8214        return false;
8215    processed_nodes.insert(index);
8216    const DAGNode &node = subpass_to_node[index];
8217    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
8218    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
8219        for (auto elem : node.prev) {
8220            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
8221                return true;
8222        }
8223    } else {
8224        return true;
8225    }
8226    return false;
8227}
8228
8229static bool CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
8230                                  const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
8231    bool result = true;
8232    // Loop through all subpasses that share the same attachment and make sure a dependency exists
8233    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
8234        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k])
8235            continue;
8236        const DAGNode &node = subpass_to_node[subpass];
8237        // Check for a specified dependency between the two nodes. If one exists we are done.
8238        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
8239        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
8240        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
8241            // If no dependency exits an implicit dependency still might. If so, warn and if not throw an error.
8242            std::unordered_set<uint32_t> processed_nodes;
8243            if (FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
8244                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes)) {
8245                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8246                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8247                                     "A dependency between subpasses %d and %d must exist but only an implicit one is specified.",
8248                                     subpass, dependent_subpasses[k]);
8249            } else {
8250                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8251                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8252                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
8253                                     dependent_subpasses[k]);
8254                result = false;
8255            }
8256        }
8257    }
8258    return result;
8259}
8260
8261static bool CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
8262                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
8263    const DAGNode &node = subpass_to_node[index];
8264    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
8265    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8266    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8267        if (attachment == subpass.pColorAttachments[j].attachment)
8268            return true;
8269    }
8270    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8271        if (attachment == subpass.pDepthStencilAttachment->attachment)
8272            return true;
8273    }
8274    bool result = false;
8275    // Loop through previous nodes and see if any of them write to the attachment.
8276    for (auto elem : node.prev) {
8277        result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
8278    }
8279    // If the attachment was written to by a previous node than this node needs to preserve it.
8280    if (result && depth > 0) {
8281        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8282        bool has_preserved = false;
8283        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8284            if (subpass.pPreserveAttachments[j] == attachment) {
8285                has_preserved = true;
8286                break;
8287            }
8288        }
8289        if (!has_preserved) {
8290            skip_call |=
8291                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8292                        DRAWSTATE_INVALID_RENDERPASS, "DS",
8293                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
8294        }
8295    }
8296    return result;
8297}
8298
8299template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
8300    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
8301           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
8302}
8303
8304bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
8305    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
8306            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
8307}
8308
8309static bool ValidateDependencies(const layer_data *my_data, FRAMEBUFFER_NODE const * framebuffer,
8310                                 RENDER_PASS_NODE const * renderPass) {
8311    bool skip_call = false;
8312    const VkFramebufferCreateInfo *pFramebufferInfo = &framebuffer->createInfo;
8313    const VkRenderPassCreateInfo *pCreateInfo = renderPass->pCreateInfo;
8314    auto const & subpass_to_node = renderPass->subpassToNode;
8315    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
8316    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
8317    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
8318    // Find overlapping attachments
8319    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8320        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
8321            VkImageView viewi = pFramebufferInfo->pAttachments[i];
8322            VkImageView viewj = pFramebufferInfo->pAttachments[j];
8323            if (viewi == viewj) {
8324                overlapping_attachments[i].push_back(j);
8325                overlapping_attachments[j].push_back(i);
8326                continue;
8327            }
8328            auto view_data_i = my_data->imageViewMap.find(viewi);
8329            auto view_data_j = my_data->imageViewMap.find(viewj);
8330            if (view_data_i == my_data->imageViewMap.end() || view_data_j == my_data->imageViewMap.end()) {
8331                continue;
8332            }
8333            if (view_data_i->second.image == view_data_j->second.image &&
8334                isRegionOverlapping(view_data_i->second.subresourceRange, view_data_j->second.subresourceRange)) {
8335                overlapping_attachments[i].push_back(j);
8336                overlapping_attachments[j].push_back(i);
8337                continue;
8338            }
8339            auto image_data_i = my_data->imageMap.find(view_data_i->second.image);
8340            auto image_data_j = my_data->imageMap.find(view_data_j->second.image);
8341            if (image_data_i == my_data->imageMap.end() || image_data_j == my_data->imageMap.end()) {
8342                continue;
8343            }
8344            if (image_data_i->second.mem == image_data_j->second.mem &&
8345                isRangeOverlapping(image_data_i->second.memOffset, image_data_i->second.memSize, image_data_j->second.memOffset,
8346                                   image_data_j->second.memSize)) {
8347                overlapping_attachments[i].push_back(j);
8348                overlapping_attachments[j].push_back(i);
8349            }
8350        }
8351    }
8352    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
8353        uint32_t attachment = i;
8354        for (auto other_attachment : overlapping_attachments[i]) {
8355            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8356                skip_call |=
8357                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8358                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
8359                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8360                            attachment, other_attachment);
8361            }
8362            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8363                skip_call |=
8364                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8365                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
8366                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8367                            other_attachment, attachment);
8368            }
8369        }
8370    }
8371    // Find for each attachment the subpasses that use them.
8372    unordered_set<uint32_t> attachmentIndices;
8373    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8374        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8375        attachmentIndices.clear();
8376        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8377            uint32_t attachment = subpass.pInputAttachments[j].attachment;
8378            input_attachment_to_subpass[attachment].push_back(i);
8379            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8380                input_attachment_to_subpass[overlapping_attachment].push_back(i);
8381            }
8382        }
8383        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8384            uint32_t attachment = subpass.pColorAttachments[j].attachment;
8385            output_attachment_to_subpass[attachment].push_back(i);
8386            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8387                output_attachment_to_subpass[overlapping_attachment].push_back(i);
8388            }
8389            attachmentIndices.insert(attachment);
8390        }
8391        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8392            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8393            output_attachment_to_subpass[attachment].push_back(i);
8394            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8395                output_attachment_to_subpass[overlapping_attachment].push_back(i);
8396            }
8397
8398            if (attachmentIndices.count(attachment)) {
8399                skip_call |=
8400                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
8401                            0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8402                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).",
8403                            attachment, i);
8404            }
8405        }
8406    }
8407    // If there is a dependency needed make sure one exists
8408    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8409        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8410        // If the attachment is an input then all subpasses that output must have a dependency relationship
8411        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8412            const uint32_t &attachment = subpass.pInputAttachments[j].attachment;
8413            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8414        }
8415        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
8416        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8417            const uint32_t &attachment = subpass.pColorAttachments[j].attachment;
8418            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8419            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8420        }
8421        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8422            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
8423            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8424            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8425        }
8426    }
8427    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
8428    // written.
8429    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8430        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8431        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8432            CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
8433        }
8434    }
8435    return skip_call;
8436}
8437// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
8438// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that
8439// READ_ONLY layout attachments don't have CLEAR as their loadOp.
8440static bool ValidateLayoutVsAttachmentDescription(debug_report_data *report_data, const VkImageLayout first_layout,
8441                                                  const uint32_t attachment,
8442                                                  const VkAttachmentDescription &attachment_description) {
8443    bool skip_call = false;
8444    // Verify that initial loadOp on READ_ONLY attachments is not CLEAR
8445    if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
8446        if ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
8447            (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)) {
8448            skip_call |=
8449                log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8450                        VkDebugReportObjectTypeEXT(0), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8451                        "Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout));
8452        }
8453    }
8454    return skip_call;
8455}
8456
8457static bool ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
8458    bool skip = false;
8459
8460    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8461        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8462        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8463            if (subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
8464                subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
8465                if (subpass.pInputAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
8466                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8467                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8468                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8469                                    "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
8470                } else {
8471                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8472                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8473                                    "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
8474                                    string_VkImageLayout(subpass.pInputAttachments[j].layout));
8475                }
8476            }
8477            auto attach_index = subpass.pInputAttachments[j].attachment;
8478            skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pInputAttachments[j].layout, attach_index,
8479                                                          pCreateInfo->pAttachments[attach_index]);
8480        }
8481        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8482            if (subpass.pColorAttachments[j].layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
8483                if (subpass.pColorAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
8484                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8485                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8486                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8487                                    "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
8488                } else {
8489                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8490                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8491                                    "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
8492                                    string_VkImageLayout(subpass.pColorAttachments[j].layout));
8493                }
8494            }
8495            auto attach_index = subpass.pColorAttachments[j].attachment;
8496            skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pColorAttachments[j].layout, attach_index,
8497                                                          pCreateInfo->pAttachments[attach_index]);
8498        }
8499        if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
8500            if (subpass.pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
8501                if (subpass.pDepthStencilAttachment->layout == VK_IMAGE_LAYOUT_GENERAL) {
8502                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8503                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8504                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8505                                    "Layout for depth attachment is GENERAL but should be DEPTH_STENCIL_ATTACHMENT_OPTIMAL.");
8506                } else {
8507                    skip |=
8508                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8509                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8510                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.",
8511                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
8512                }
8513            }
8514            auto attach_index = subpass.pDepthStencilAttachment->attachment;
8515            skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pDepthStencilAttachment->layout,
8516                                                          attach_index, pCreateInfo->pAttachments[attach_index]);
8517        }
8518    }
8519    return skip;
8520}
8521
8522static bool CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
8523                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
8524    bool skip_call = false;
8525    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8526        DAGNode &subpass_node = subpass_to_node[i];
8527        subpass_node.pass = i;
8528    }
8529    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
8530        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
8531        if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
8532            dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
8533            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8534                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
8535                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
8536        } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
8537            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8538                                 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
8539        } else if (dependency.srcSubpass == dependency.dstSubpass) {
8540            has_self_dependency[dependency.srcSubpass] = true;
8541        }
8542        if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
8543            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
8544        }
8545        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
8546            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
8547        }
8548    }
8549    return skip_call;
8550}
8551
8552
8553VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
8554                                                  const VkAllocationCallbacks *pAllocator,
8555                                                  VkShaderModule *pShaderModule) {
8556    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8557    bool skip_call = false;
8558
8559    /* Use SPIRV-Tools validator to try and catch any issues with the module itself */
8560    spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
8561    spv_const_binary_t binary { pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t) };
8562    spv_diagnostic diag = nullptr;
8563
8564    auto result = spvValidate(ctx, &binary, &diag);
8565    if (result != SPV_SUCCESS) {
8566        skip_call |= log_msg(my_data->report_data,
8567                             result == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
8568                             VkDebugReportObjectTypeEXT(0), 0,
8569                             __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC", "SPIR-V module not valid: %s",
8570                             diag && diag->error ? diag->error : "(no error text)");
8571    }
8572
8573    spvDiagnosticDestroy(diag);
8574    spvContextDestroy(ctx);
8575
8576    if (skip_call)
8577        return VK_ERROR_VALIDATION_FAILED_EXT;
8578
8579    VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
8580
8581    if (res == VK_SUCCESS) {
8582        std::lock_guard<std::mutex> lock(global_lock);
8583        my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
8584    }
8585    return res;
8586}
8587
8588VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
8589                                                const VkAllocationCallbacks *pAllocator,
8590                                                VkRenderPass *pRenderPass) {
8591    bool skip_call = false;
8592    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8593    // Create DAG
8594    std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
8595    std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
8596    {
8597        std::lock_guard<std::mutex> lock(global_lock);
8598        skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
8599        // Validate
8600        skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
8601        if (skip_call) {
8602            return VK_ERROR_VALIDATION_FAILED_EXT;
8603        }
8604    }
8605    VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
8606    if (VK_SUCCESS == result) {
8607        // TODOSC : Merge in tracking of renderpass from shader_checker
8608        // Shadow create info and store in map
8609        VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
8610        if (pCreateInfo->pAttachments) {
8611            localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount];
8612            memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments,
8613                   localRPCI->attachmentCount * sizeof(VkAttachmentDescription));
8614        }
8615        if (pCreateInfo->pSubpasses) {
8616            localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount];
8617            memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription));
8618
8619            for (uint32_t i = 0; i < localRPCI->subpassCount; i++) {
8620                VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i];
8621                const uint32_t attachmentCount = subpass->inputAttachmentCount +
8622                                                 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) +
8623                                                 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
8624                VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount];
8625
8626                memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount);
8627                subpass->pInputAttachments = attachments;
8628                attachments += subpass->inputAttachmentCount;
8629
8630                memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
8631                subpass->pColorAttachments = attachments;
8632                attachments += subpass->colorAttachmentCount;
8633
8634                if (subpass->pResolveAttachments) {
8635                    memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
8636                    subpass->pResolveAttachments = attachments;
8637                    attachments += subpass->colorAttachmentCount;
8638                }
8639
8640                if (subpass->pDepthStencilAttachment) {
8641                    memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1);
8642                    subpass->pDepthStencilAttachment = attachments;
8643                    attachments += 1;
8644                }
8645
8646                memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount);
8647                subpass->pPreserveAttachments = &attachments->attachment;
8648            }
8649        }
8650        if (pCreateInfo->pDependencies) {
8651            localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
8652            memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies,
8653                   localRPCI->dependencyCount * sizeof(VkSubpassDependency));
8654        }
8655
8656        auto render_pass = new RENDER_PASS_NODE(localRPCI);
8657        render_pass->renderPass = *pRenderPass;
8658        render_pass->hasSelfDependency = has_self_dependency;
8659        render_pass->subpassToNode = subpass_to_node;
8660#if MTMERGESOURCE
8661        // MTMTODO : Merge with code from above to eliminate duplication
8662        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8663            VkAttachmentDescription desc = pCreateInfo->pAttachments[i];
8664            MT_PASS_ATTACHMENT_INFO pass_info;
8665            pass_info.load_op = desc.loadOp;
8666            pass_info.store_op = desc.storeOp;
8667            pass_info.attachment = i;
8668            render_pass->attachments.push_back(pass_info);
8669        }
8670        // TODO: Maybe fill list and then copy instead of locking
8671        std::unordered_map<uint32_t, bool> &attachment_first_read = render_pass->attachment_first_read;
8672        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout =
8673            render_pass->attachment_first_layout;
8674        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8675            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8676            if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
8677                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8678                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8679                                     "Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
8680            }
8681            for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8682                uint32_t attachment = subpass.pPreserveAttachments[j];
8683                if (attachment >= pCreateInfo->attachmentCount) {
8684                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8685                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8686                                         "Preserve attachment %d cannot be greater than the total number of attachments %d.",
8687                                         attachment, pCreateInfo->attachmentCount);
8688                }
8689            }
8690            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8691                uint32_t attachment;
8692                if (subpass.pResolveAttachments) {
8693                    attachment = subpass.pResolveAttachments[j].attachment;
8694                    if (attachment >= pCreateInfo->attachmentCount && attachment != VK_ATTACHMENT_UNUSED) {
8695                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8696                                             __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8697                                             "Color attachment %d cannot be greater than the total number of attachments %d.",
8698                                             attachment, pCreateInfo->attachmentCount);
8699                        continue;
8700                    }
8701                }
8702                attachment = subpass.pColorAttachments[j].attachment;
8703                if (attachment >= pCreateInfo->attachmentCount) {
8704                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8705                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8706                                         "Color attachment %d cannot be greater than the total number of attachments %d.",
8707                                         attachment, pCreateInfo->attachmentCount);
8708                    continue;
8709                }
8710                if (attachment_first_read.count(attachment))
8711                    continue;
8712                attachment_first_read.insert(std::make_pair(attachment, false));
8713                attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
8714            }
8715            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8716                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8717                if (attachment >= pCreateInfo->attachmentCount) {
8718                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8719                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8720                                         "Depth stencil attachment %d cannot be greater than the total number of attachments %d.",
8721                                         attachment, pCreateInfo->attachmentCount);
8722                    continue;
8723                }
8724                if (attachment_first_read.count(attachment))
8725                    continue;
8726                attachment_first_read.insert(std::make_pair(attachment, false));
8727                attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
8728            }
8729            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8730                uint32_t attachment = subpass.pInputAttachments[j].attachment;
8731                if (attachment >= pCreateInfo->attachmentCount) {
8732                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8733                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8734                                         "Input attachment %d cannot be greater than the total number of attachments %d.",
8735                                         attachment, pCreateInfo->attachmentCount);
8736                    continue;
8737                }
8738                if (attachment_first_read.count(attachment))
8739                    continue;
8740                attachment_first_read.insert(std::make_pair(attachment, true));
8741                attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
8742            }
8743        }
8744#endif
8745        {
8746            std::lock_guard<std::mutex> lock(global_lock);
8747            dev_data->renderPassMap[*pRenderPass] = render_pass;
8748        }
8749    }
8750    return result;
8751}
8752// Free the renderpass shadow
8753static void deleteRenderPasses(layer_data *my_data) {
8754    if (my_data->renderPassMap.size() <= 0)
8755        return;
8756    for (auto ii = my_data->renderPassMap.begin(); ii != my_data->renderPassMap.end(); ++ii) {
8757        const VkRenderPassCreateInfo *pRenderPassInfo = (*ii).second->pCreateInfo;
8758        delete[] pRenderPassInfo->pAttachments;
8759        if (pRenderPassInfo->pSubpasses) {
8760            for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) {
8761                // Attachements are all allocated in a block, so just need to
8762                //  find the first non-null one to delete
8763                if (pRenderPassInfo->pSubpasses[i].pInputAttachments) {
8764                    delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments;
8765                } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) {
8766                    delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments;
8767                } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) {
8768                    delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments;
8769                } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) {
8770                    delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments;
8771                }
8772            }
8773            delete[] pRenderPassInfo->pSubpasses;
8774        }
8775        delete[] pRenderPassInfo->pDependencies;
8776        delete pRenderPassInfo;
8777        delete (*ii).second;
8778    }
8779    my_data->renderPassMap.clear();
8780}
8781
8782static bool VerifyFramebufferAndRenderPassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
8783    bool skip_call = false;
8784    const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
8785    const VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer].createInfo;
8786    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
8787        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8788                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
8789                                                                 "with a different number of attachments.");
8790    }
8791    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
8792        const VkImageView &image_view = framebufferInfo.pAttachments[i];
8793        auto image_data = dev_data->imageViewMap.find(image_view);
8794        assert(image_data != dev_data->imageViewMap.end());
8795        const VkImage &image = image_data->second.image;
8796        const VkImageSubresourceRange &subRange = image_data->second.subresourceRange;
8797        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
8798                                             pRenderPassInfo->pAttachments[i].initialLayout};
8799        // TODO: Do not iterate over every possibility - consolidate where possible
8800        for (uint32_t j = 0; j < subRange.levelCount; j++) {
8801            uint32_t level = subRange.baseMipLevel + j;
8802            for (uint32_t k = 0; k < subRange.layerCount; k++) {
8803                uint32_t layer = subRange.baseArrayLayer + k;
8804                VkImageSubresource sub = {subRange.aspectMask, level, layer};
8805                IMAGE_CMD_BUF_LAYOUT_NODE node;
8806                if (!FindLayout(pCB, image, sub, node)) {
8807                    SetLayout(pCB, image, sub, newNode);
8808                    continue;
8809                }
8810                if (newNode.layout != node.layout) {
8811                    skip_call |=
8812                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8813                                DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using attachment %i "
8814                                                                    "where the "
8815                                                                    "initial layout is %s and the layout of the attachment at the "
8816                                                                    "start of the render pass is %s. The layouts must match.",
8817                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
8818                }
8819            }
8820        }
8821    }
8822    return skip_call;
8823}
8824
8825static void TransitionSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
8826                                     const int subpass_index) {
8827    auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
8828    if (!renderPass)
8829        return;
8830
8831    auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer);
8832    if (!framebuffer)
8833        return;
8834
8835    const VkFramebufferCreateInfo &framebufferInfo = framebuffer->createInfo;
8836    const VkSubpassDescription &subpass = renderPass->pCreateInfo->pSubpasses[subpass_index];
8837    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8838        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pInputAttachments[j].attachment];
8839        SetLayout(dev_data, pCB, image_view, subpass.pInputAttachments[j].layout);
8840    }
8841    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8842        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pColorAttachments[j].attachment];
8843        SetLayout(dev_data, pCB, image_view, subpass.pColorAttachments[j].layout);
8844    }
8845    if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
8846        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pDepthStencilAttachment->attachment];
8847        SetLayout(dev_data, pCB, image_view, subpass.pDepthStencilAttachment->layout);
8848    }
8849}
8850
8851static bool validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
8852    bool skip_call = false;
8853    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
8854        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8855                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
8856                             cmd_name.c_str());
8857    }
8858    return skip_call;
8859}
8860
8861static void TransitionFinalSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
8862    auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
8863    if (!renderPass)
8864        return;
8865
8866    const VkRenderPassCreateInfo *pRenderPassInfo = renderPass->pCreateInfo;
8867    auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer);
8868    if (!framebuffer)
8869        return;
8870
8871    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
8872        const VkImageView &image_view = framebuffer->createInfo.pAttachments[i];
8873        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
8874    }
8875}
8876
8877static bool VerifyRenderAreaBounds(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
8878    bool skip_call = false;
8879    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
8880    if (pRenderPassBegin->renderArea.offset.x < 0 ||
8881        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
8882        pRenderPassBegin->renderArea.offset.y < 0 ||
8883        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
8884        skip_call |= static_cast<bool>(log_msg(
8885            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8886            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
8887            "Cannot execute a render pass with renderArea not within the bound of the "
8888            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
8889            "height %d.",
8890            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
8891            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
8892    }
8893    return skip_call;
8894}
8895
8896VKAPI_ATTR void VKAPI_CALL
8897CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
8898    bool skipCall = false;
8899    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8900    std::unique_lock<std::mutex> lock(global_lock);
8901    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8902    auto renderPass = pRenderPassBegin ? getRenderPass(dev_data, pRenderPassBegin->renderPass) : nullptr;
8903    auto framebuffer = pRenderPassBegin ? getFramebuffer(dev_data, pRenderPassBegin->framebuffer) : nullptr;
8904    if (pCB) {
8905        if (renderPass) {
8906#if MTMERGE
8907            pCB->activeFramebuffer = pRenderPassBegin->framebuffer;
8908            for (size_t i = 0; i < renderPass->attachments.size(); ++i) {
8909                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
8910                if (renderPass->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
8911                    std::function<bool()> function = [=]() {
8912                        set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
8913                        return false;
8914                    };
8915                    pCB->validate_functions.push_back(function);
8916                } else if (renderPass->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE) {
8917                    std::function<bool()> function = [=]() {
8918                        set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
8919                        return false;
8920                    };
8921                    pCB->validate_functions.push_back(function);
8922                } else if (renderPass->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
8923                    std::function<bool()> function = [=]() {
8924                        return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
8925                    };
8926                    pCB->validate_functions.push_back(function);
8927                }
8928                if (renderPass->attachment_first_read[renderPass->attachments[i].attachment]) {
8929                    std::function<bool()> function = [=]() {
8930                        return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
8931                    };
8932                    pCB->validate_functions.push_back(function);
8933                }
8934            }
8935#endif
8936            skipCall |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
8937            skipCall |= VerifyFramebufferAndRenderPassLayouts(dev_data, pCB, pRenderPassBegin);
8938            skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass");
8939            if (renderPass) {
8940                skipCall |= ValidateDependencies(dev_data, framebuffer, renderPass);
8941            }
8942            pCB->activeRenderPass = renderPass;
8943            skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass");
8944            skipCall |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
8945            // This is a shallow copy as that is all that is needed for now
8946            pCB->activeRenderPassBeginInfo = *pRenderPassBegin;
8947            pCB->activeSubpass = 0;
8948            pCB->activeSubpassContents = contents;
8949            pCB->framebuffers.insert(pRenderPassBegin->framebuffer);
8950            // Connect this framebuffer to this cmdBuffer
8951            framebuffer->referencingCmdBuffers.insert(pCB->commandBuffer);
8952        } else {
8953            skipCall |=
8954                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8955                            DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
8956        }
8957    }
8958    lock.unlock();
8959    if (!skipCall) {
8960        dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
8961    }
8962}
8963
8964VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
8965    bool skipCall = false;
8966    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8967    std::unique_lock<std::mutex> lock(global_lock);
8968    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8969    if (pCB) {
8970        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
8971        skipCall |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
8972        pCB->activeSubpass++;
8973        pCB->activeSubpassContents = contents;
8974        TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
8975        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
8976    }
8977    lock.unlock();
8978    if (!skipCall)
8979        dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents);
8980}
8981
8982VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
8983    bool skipCall = false;
8984    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8985    std::unique_lock<std::mutex> lock(global_lock);
8986    auto pCB = getCBNode(dev_data, commandBuffer);
8987    if (pCB) {
8988        RENDER_PASS_NODE* pRPNode = pCB->activeRenderPass;
8989        auto framebuffer = getFramebuffer(dev_data, pCB->activeFramebuffer);
8990        if (pRPNode) {
8991            for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
8992                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
8993                if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
8994                    std::function<bool()> function = [=]() {
8995                        set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
8996                        return false;
8997                    };
8998                    pCB->validate_functions.push_back(function);
8999                } else if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE) {
9000                    std::function<bool()> function = [=]() {
9001                        set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9002                        return false;
9003                    };
9004                    pCB->validate_functions.push_back(function);
9005                }
9006            }
9007        }
9008        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
9009        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
9010        skipCall |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9011        TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo);
9012        pCB->activeRenderPass = nullptr;
9013        pCB->activeSubpass = 0;
9014        pCB->activeFramebuffer = VK_NULL_HANDLE;
9015    }
9016    lock.unlock();
9017    if (!skipCall)
9018        dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer);
9019}
9020
9021static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, RENDER_PASS_NODE const *secondaryPass,
9022                                        RENDER_PASS_NODE const *primaryPass, uint32_t primaryAttach, uint32_t secondaryAttach,
9023                                        const char *msg) {
9024    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9025                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9026                   "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p which has a render pass 0x%" PRIx64
9027                   " that is not compatible with the current render pass 0x%" PRIx64 "."
9028                   "Attachment %" PRIu32 " is not compatible with %" PRIu32 ". %s",
9029                   (void *)secondaryBuffer, (uint64_t)(secondaryPass->renderPass), (uint64_t)(primaryPass->renderPass), primaryAttach, secondaryAttach,
9030                   msg);
9031}
9032
9033static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, RENDER_PASS_NODE const *primaryPass,
9034                                            uint32_t primaryAttach, VkCommandBuffer secondaryBuffer, RENDER_PASS_NODE const *secondaryPass,
9035                                            uint32_t secondaryAttach, bool is_multi) {
9036    bool skip_call = false;
9037    if (primaryPass->pCreateInfo->attachmentCount <= primaryAttach) {
9038        primaryAttach = VK_ATTACHMENT_UNUSED;
9039    }
9040    if (secondaryPass->pCreateInfo->attachmentCount <= secondaryAttach) {
9041        secondaryAttach = VK_ATTACHMENT_UNUSED;
9042    }
9043    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
9044        return skip_call;
9045    }
9046    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
9047        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9048                                                 secondaryAttach, "The first is unused while the second is not.");
9049        return skip_call;
9050    }
9051    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
9052        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9053                                                 secondaryAttach, "The second is unused while the first is not.");
9054        return skip_call;
9055    }
9056    if (primaryPass->pCreateInfo->pAttachments[primaryAttach].format !=
9057        secondaryPass->pCreateInfo->pAttachments[secondaryAttach].format) {
9058        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9059                                                 secondaryAttach, "They have different formats.");
9060    }
9061    if (primaryPass->pCreateInfo->pAttachments[primaryAttach].samples !=
9062        secondaryPass->pCreateInfo->pAttachments[secondaryAttach].samples) {
9063        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9064                                                 secondaryAttach, "They have different samples.");
9065    }
9066    if (is_multi &&
9067        primaryPass->pCreateInfo->pAttachments[primaryAttach].flags !=
9068            secondaryPass->pCreateInfo->pAttachments[secondaryAttach].flags) {
9069        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9070                                                 secondaryAttach, "They have different flags.");
9071    }
9072    return skip_call;
9073}
9074
9075static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, RENDER_PASS_NODE const *primaryPass,
9076                                         VkCommandBuffer secondaryBuffer, RENDER_PASS_NODE const *secondaryPass, const int subpass,
9077                                         bool is_multi) {
9078    bool skip_call = false;
9079    const VkSubpassDescription &primary_desc = primaryPass->pCreateInfo->pSubpasses[subpass];
9080    const VkSubpassDescription &secondary_desc = secondaryPass->pCreateInfo->pSubpasses[subpass];
9081    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
9082    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
9083        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
9084        if (i < primary_desc.inputAttachmentCount) {
9085            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
9086        }
9087        if (i < secondary_desc.inputAttachmentCount) {
9088            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
9089        }
9090        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_input_attach, secondaryBuffer,
9091                                                     secondaryPass, secondary_input_attach, is_multi);
9092    }
9093    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
9094    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
9095        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
9096        if (i < primary_desc.colorAttachmentCount) {
9097            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
9098        }
9099        if (i < secondary_desc.colorAttachmentCount) {
9100            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
9101        }
9102        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_color_attach, secondaryBuffer,
9103                                                     secondaryPass, secondary_color_attach, is_multi);
9104        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
9105        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
9106            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
9107        }
9108        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
9109            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
9110        }
9111        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_resolve_attach, secondaryBuffer,
9112                                                     secondaryPass, secondary_resolve_attach, is_multi);
9113    }
9114    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
9115    if (primary_desc.pDepthStencilAttachment) {
9116        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
9117    }
9118    if (secondary_desc.pDepthStencilAttachment) {
9119        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
9120    }
9121    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_depthstencil_attach, secondaryBuffer,
9122                                                 secondaryPass, secondary_depthstencil_attach, is_multi);
9123    return skip_call;
9124}
9125
9126static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9127                                            VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass) {
9128    bool skip_call = false;
9129    // Early exit if renderPass objects are identical (and therefore compatible)
9130    if (primaryPass == secondaryPass)
9131        return skip_call;
9132    auto primary_render_pass = getRenderPass(dev_data, primaryPass);
9133    auto secondary_render_pass = getRenderPass(dev_data, secondaryPass);
9134    if (!primary_render_pass) {
9135        skip_call |=
9136            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9137                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9138                    "vkCmdExecuteCommands() called w/ invalid current Cmd Buffer 0x%p which has invalid render pass 0x%" PRIx64 ".",
9139                    (void *)primaryBuffer, (uint64_t)(primaryPass));
9140        return skip_call;
9141    }
9142    if (!secondary_render_pass) {
9143        skip_call |=
9144            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9145                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9146                    "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%p which has invalid render pass 0x%" PRIx64 ".",
9147                    (void *)secondaryBuffer, (uint64_t)(secondaryPass));
9148        return skip_call;
9149    }
9150    if (primary_render_pass->pCreateInfo->subpassCount != secondary_render_pass->pCreateInfo->subpassCount) {
9151        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9152                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9153                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p which has a render pass 0x%" PRIx64
9154                             " that is not compatible with the current render pass 0x%" PRIx64 "."
9155                             "They have a different number of subpasses.",
9156                             (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass));
9157        return skip_call;
9158    }
9159    auto subpassCount = primary_render_pass->pCreateInfo->subpassCount;
9160    for (uint32_t i = 0; i < subpassCount; ++i) {
9161        skip_call |= validateSubpassCompatibility(dev_data, primaryBuffer, primary_render_pass, secondaryBuffer,
9162                                                  secondary_render_pass, i, subpassCount > 1);
9163    }
9164    return skip_call;
9165}
9166
9167static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9168                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
9169    bool skip_call = false;
9170    if (!pSubCB->beginInfo.pInheritanceInfo) {
9171        return skip_call;
9172    }
9173    VkFramebuffer primary_fb = pCB->activeFramebuffer;
9174    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9175    if (secondary_fb != VK_NULL_HANDLE) {
9176        if (primary_fb != secondary_fb) {
9177            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9178                                 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9179                                 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p which has a framebuffer 0x%" PRIx64
9180                                 " that is not compatible with the current framebuffer 0x%" PRIx64 ".",
9181                                 (void *)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb));
9182        }
9183        auto fb = getFramebuffer(dev_data, secondary_fb);
9184        if (!fb) {
9185            skip_call |=
9186                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9187                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9188                                                                          "which has invalid framebuffer 0x%" PRIx64 ".",
9189                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
9190            return skip_call;
9191        }
9192        skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->createInfo.renderPass,
9193                                                     secondaryBuffer, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9194    }
9195    return skip_call;
9196}
9197
9198static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9199    bool skipCall = false;
9200    unordered_set<int> activeTypes;
9201    for (auto queryObject : pCB->activeQueries) {
9202        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9203        if (queryPoolData != dev_data->queryPoolMap.end()) {
9204            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9205                pSubCB->beginInfo.pInheritanceInfo) {
9206                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
9207                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
9208                    skipCall |= log_msg(
9209                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9210                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9211                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9212                        "which has invalid active query pool 0x%" PRIx64 ". Pipeline statistics is being queried so the command "
9213                        "buffer must have all bits set on the queryPool.",
9214                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
9215                }
9216            }
9217            activeTypes.insert(queryPoolData->second.createInfo.queryType);
9218        }
9219    }
9220    for (auto queryObject : pSubCB->startedQueries) {
9221        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9222        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
9223            skipCall |=
9224                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9225                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9226                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9227                        "which has invalid active query pool 0x%" PRIx64 "of type %d but a query of that type has been started on "
9228                        "secondary Cmd Buffer 0x%p.",
9229                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
9230                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
9231        }
9232    }
9233    return skipCall;
9234}
9235
9236VKAPI_ATTR void VKAPI_CALL
9237CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
9238    bool skipCall = false;
9239    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9240    std::unique_lock<std::mutex> lock(global_lock);
9241    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9242    if (pCB) {
9243        GLOBAL_CB_NODE *pSubCB = NULL;
9244        for (uint32_t i = 0; i < commandBuffersCount; i++) {
9245            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
9246            if (!pSubCB) {
9247                skipCall |=
9248                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9249                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9250                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p in element %u of pCommandBuffers array.",
9251                            (void *)pCommandBuffers[i], i);
9252            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
9253                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9254                                    __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9255                                    "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
9256                                    "array. All cmd buffers in pCommandBuffers array must be secondary.",
9257                                    (void *)pCommandBuffers[i], i);
9258            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
9259                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
9260                    skipCall |= log_msg(
9261                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9262                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
9263                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
9264                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
9265                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass);
9266                } else {
9267                    // Make sure render pass is compatible with parent command buffer pass if has continue
9268                    skipCall |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->renderPass, pCommandBuffers[i],
9269                                                                pSubCB->beginInfo.pInheritanceInfo->renderPass);
9270                    skipCall |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
9271                }
9272                string errorString = "";
9273                if (!verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->renderPass,
9274                                                     pSubCB->beginInfo.pInheritanceInfo->renderPass, errorString)) {
9275                    skipCall |= log_msg(
9276                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9277                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9278                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
9279                        ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
9280                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
9281                        (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str());
9282                }
9283                //  If framebuffer for secondary CB is not NULL, then it must match FB from vkCmdBeginRenderPass()
9284                //   that this CB will be executed in AND framebuffer must have been created w/ RP compatible w/ renderpass
9285                if (pSubCB->beginInfo.pInheritanceInfo->framebuffer) {
9286                    if (pSubCB->beginInfo.pInheritanceInfo->framebuffer != pCB->activeRenderPassBeginInfo.framebuffer) {
9287                        skipCall |= log_msg(
9288                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9289                            (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
9290                            "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) references framebuffer (0x%" PRIxLEAST64
9291                            ") that does not match framebuffer (0x%" PRIxLEAST64 ") in active renderpass (0x%" PRIxLEAST64 ").",
9292                            (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer,
9293                            (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass->renderPass);
9294                    }
9295                }
9296            }
9297            // TODO(mlentine): Move more logic into this method
9298            skipCall |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
9299            skipCall |= validateCommandBufferState(dev_data, pSubCB);
9300            // Secondary cmdBuffers are considered pending execution starting w/
9301            // being recorded
9302            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
9303                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
9304                    skipCall |= log_msg(
9305                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9306                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9307                        "Attempt to simultaneously execute CB 0x%" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
9308                        "set!",
9309                        (uint64_t)(pCB->commandBuffer));
9310                }
9311                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
9312                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
9313                    skipCall |= log_msg(
9314                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9315                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9316                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIxLEAST64
9317                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
9318                        "(0x%" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
9319                                          "set, even though it does.",
9320                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
9321                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
9322                }
9323            }
9324            if (!pCB->activeQueries.empty() && !dev_data->phys_dev_properties.features.inheritedQueries) {
9325                skipCall |=
9326                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9327                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
9328                            "vkCmdExecuteCommands(): Secondary Command Buffer "
9329                            "(0x%" PRIxLEAST64 ") cannot be submitted with a query in "
9330                            "flight and inherited queries not "
9331                            "supported on this device.",
9332                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
9333            }
9334            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
9335            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
9336            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
9337        }
9338        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
9339        skipCall |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
9340    }
9341    lock.unlock();
9342    if (!skipCall)
9343        dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
9344}
9345
9346static bool ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) {
9347    bool skip_call = false;
9348    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9349    auto mem_data = dev_data->memObjMap.find(mem);
9350    if ((mem_data != dev_data->memObjMap.end()) && (mem_data->second.image != VK_NULL_HANDLE)) {
9351        std::vector<VkImageLayout> layouts;
9352        if (FindLayouts(dev_data, mem_data->second.image, layouts)) {
9353            for (auto layout : layouts) {
9354                if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
9355                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9356                                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
9357                                                                                         "GENERAL or PREINITIALIZED are supported.",
9358                                         string_VkImageLayout(layout));
9359                }
9360            }
9361        }
9362    }
9363    return skip_call;
9364}
9365
9366VKAPI_ATTR VkResult VKAPI_CALL
9367MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
9368    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9369
9370    bool skip_call = false;
9371    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9372    std::unique_lock<std::mutex> lock(global_lock);
9373#if MTMERGESOURCE
9374    DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
9375    if (pMemObj) {
9376        pMemObj->valid = true;
9377        if ((dev_data->phys_dev_mem_props.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags &
9378             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
9379            skip_call =
9380                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9381                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
9382                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
9383        }
9384    }
9385    skip_call |= validateMemRange(dev_data, mem, offset, size);
9386#endif
9387    skip_call |= ValidateMapImageLayouts(device, mem);
9388    lock.unlock();
9389
9390    if (!skip_call) {
9391        result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData);
9392        if (VK_SUCCESS == result) {
9393#if MTMERGESOURCE
9394            lock.lock();
9395            storeMemRanges(dev_data, mem, offset, size);
9396            initializeAndTrackMemory(dev_data, mem, size, ppData);
9397            lock.unlock();
9398#endif
9399        }
9400    }
9401    return result;
9402}
9403
9404VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
9405    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9406    bool skipCall = false;
9407
9408    std::unique_lock<std::mutex> lock(global_lock);
9409    skipCall |= deleteMemRanges(my_data, mem);
9410    lock.unlock();
9411    if (!skipCall) {
9412        my_data->device_dispatch_table->UnmapMemory(device, mem);
9413    }
9414}
9415
9416static bool validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount,
9417                                   const VkMappedMemoryRange *pMemRanges) {
9418    bool skipCall = false;
9419    for (uint32_t i = 0; i < memRangeCount; ++i) {
9420        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
9421        if (mem_element != my_data->memObjMap.end()) {
9422            if (mem_element->second.memRange.offset > pMemRanges[i].offset) {
9423                skipCall |= log_msg(
9424                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9425                    (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
9426                    "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
9427                    "(" PRINTF_SIZE_T_SPECIFIER ").",
9428                    funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_element->second.memRange.offset));
9429            }
9430            if ((mem_element->second.memRange.size != VK_WHOLE_SIZE) &&
9431                ((mem_element->second.memRange.offset + mem_element->second.memRange.size) <
9432                 (pMemRanges[i].offset + pMemRanges[i].size))) {
9433                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9434                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9435                                    MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
9436                                                                 ") exceeds the Memory Object's upper-bound "
9437                                                                 "(" PRINTF_SIZE_T_SPECIFIER ").",
9438                                    funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
9439                                    static_cast<size_t>(mem_element->second.memRange.offset + mem_element->second.memRange.size));
9440            }
9441        }
9442    }
9443    return skipCall;
9444}
9445
9446static bool validateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount,
9447                                                     const VkMappedMemoryRange *pMemRanges) {
9448    bool skipCall = false;
9449    for (uint32_t i = 0; i < memRangeCount; ++i) {
9450        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
9451        if (mem_element != my_data->memObjMap.end()) {
9452            if (mem_element->second.pData) {
9453                VkDeviceSize size = mem_element->second.memRange.size;
9454                VkDeviceSize half_size = (size / 2);
9455                char *data = static_cast<char *>(mem_element->second.pData);
9456                for (auto j = 0; j < half_size; ++j) {
9457                    if (data[j] != NoncoherentMemoryFillValue) {
9458                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9459                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9460                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj 0x%" PRIxLEAST64,
9461                                            (uint64_t)pMemRanges[i].memory);
9462                    }
9463                }
9464                for (auto j = size + half_size; j < 2 * size; ++j) {
9465                    if (data[j] != NoncoherentMemoryFillValue) {
9466                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9467                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9468                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj 0x%" PRIxLEAST64,
9469                                            (uint64_t)pMemRanges[i].memory);
9470                    }
9471                }
9472                memcpy(mem_element->second.pDriverData, static_cast<void *>(data + (size_t)(half_size)), (size_t)(size));
9473            }
9474        }
9475    }
9476    return skipCall;
9477}
9478
9479VkResult VKAPI_CALL
9480FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
9481    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9482    bool skipCall = false;
9483    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9484
9485    std::unique_lock<std::mutex> lock(global_lock);
9486    skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
9487    skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
9488    lock.unlock();
9489    if (!skipCall) {
9490        result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
9491    }
9492    return result;
9493}
9494
9495VkResult VKAPI_CALL
9496InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
9497    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9498    bool skipCall = false;
9499    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9500
9501    std::unique_lock<std::mutex> lock(global_lock);
9502    skipCall |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
9503    lock.unlock();
9504    if (!skipCall) {
9505        result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
9506    }
9507    return result;
9508}
9509
9510VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
9511    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9512    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9513    bool skipCall = false;
9514    std::unique_lock<std::mutex> lock(global_lock);
9515    auto image_node = dev_data->imageMap.find(image);
9516    if (image_node != dev_data->imageMap.end()) {
9517        // Track objects tied to memory
9518        uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
9519        skipCall = set_mem_binding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
9520        VkMemoryRequirements memRequirements;
9521        lock.unlock();
9522        dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements);
9523        lock.lock();
9524
9525        // Track and validate bound memory range information
9526        const auto &memEntry = dev_data->memObjMap.find(mem);
9527        if (memEntry != dev_data->memObjMap.end()) {
9528            const MEMORY_RANGE range =
9529                insert_memory_ranges(image_handle, mem, memoryOffset, memRequirements, memEntry->second.imageRanges);
9530            skipCall |=
9531                validate_memory_range(dev_data, memEntry->second.bufferRanges, range, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
9532        }
9533
9534        print_mem_list(dev_data);
9535        lock.unlock();
9536        if (!skipCall) {
9537            result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset);
9538            lock.lock();
9539            dev_data->memObjMap[mem].image = image;
9540            image_node->second.mem = mem;
9541            image_node->second.memOffset = memoryOffset;
9542            image_node->second.memSize = memRequirements.size;
9543            lock.unlock();
9544        }
9545    } else {
9546        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9547                reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
9548                "vkBindImageMemory: Cannot find invalid image 0x%" PRIx64 ", has it already been deleted?",
9549                reinterpret_cast<const uint64_t &>(image));
9550    }
9551    return result;
9552}
9553
9554VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
9555    bool skip_call = false;
9556    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9557    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9558    std::unique_lock<std::mutex> lock(global_lock);
9559    auto event_node = dev_data->eventMap.find(event);
9560    if (event_node != dev_data->eventMap.end()) {
9561        event_node->second.needsSignaled = false;
9562        event_node->second.stageMask = VK_PIPELINE_STAGE_HOST_BIT;
9563        if (event_node->second.write_in_use) {
9564            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
9565                                 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9566                                 "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
9567                                 reinterpret_cast<const uint64_t &>(event));
9568        }
9569    }
9570    lock.unlock();
9571    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
9572    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
9573    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
9574    for (auto queue_data : dev_data->queueMap) {
9575        auto event_entry = queue_data.second.eventToStageMap.find(event);
9576        if (event_entry != queue_data.second.eventToStageMap.end()) {
9577            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
9578        }
9579    }
9580    if (!skip_call)
9581        result = dev_data->device_dispatch_table->SetEvent(device, event);
9582    return result;
9583}
9584
9585VKAPI_ATTR VkResult VKAPI_CALL
9586QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
9587    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
9588    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9589    bool skip_call = false;
9590    std::unique_lock<std::mutex> lock(global_lock);
9591    // First verify that fence is not in use
9592    if (fence != VK_NULL_HANDLE) {
9593        auto fence_data = dev_data->fenceMap.find(fence);
9594        if ((bindInfoCount != 0) && fence_data->second.in_use.load()) {
9595            skip_call |=
9596                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
9597                        reinterpret_cast<uint64_t &>(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
9598                        "Fence 0x%" PRIx64 " is already in use by another submission.", reinterpret_cast<uint64_t &>(fence));
9599        }
9600        if (!fence_data->second.needsSignaled) {
9601            skip_call |=
9602                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
9603                        reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
9604                        "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
9605                        reinterpret_cast<uint64_t &>(fence));
9606        }
9607        trackCommandBuffers(dev_data, queue, 0, nullptr, fence);
9608    }
9609    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
9610        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
9611        // Track objects tied to memory
9612        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
9613            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
9614                if (set_sparse_mem_binding(dev_data, bindInfo.pBufferBinds[j].pBinds[k].memory,
9615                                           (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
9616                                           "vkQueueBindSparse"))
9617                    skip_call = true;
9618            }
9619        }
9620        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
9621            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
9622                if (set_sparse_mem_binding(dev_data, bindInfo.pImageOpaqueBinds[j].pBinds[k].memory,
9623                                           (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9624                                           "vkQueueBindSparse"))
9625                    skip_call = true;
9626            }
9627        }
9628        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
9629            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
9630                if (set_sparse_mem_binding(dev_data, bindInfo.pImageBinds[j].pBinds[k].memory,
9631                                           (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9632                                           "vkQueueBindSparse"))
9633                    skip_call = true;
9634            }
9635        }
9636        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
9637            const VkSemaphore &semaphore = bindInfo.pWaitSemaphores[i];
9638            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
9639                if (dev_data->semaphoreMap[semaphore].signaled) {
9640                    dev_data->semaphoreMap[semaphore].signaled = false;
9641                } else {
9642                    skip_call |=
9643                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9644                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9645                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64
9646                                " that has no way to be signaled.",
9647                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
9648                }
9649            }
9650        }
9651        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
9652            const VkSemaphore &semaphore = bindInfo.pSignalSemaphores[i];
9653            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
9654                if (dev_data->semaphoreMap[semaphore].signaled) {
9655                    skip_call =
9656                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9657                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9658                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
9659                                ", but that semaphore is already signaled.",
9660                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
9661                }
9662                dev_data->semaphoreMap[semaphore].signaled = true;
9663            }
9664        }
9665    }
9666    print_mem_list(dev_data);
9667    lock.unlock();
9668
9669    if (!skip_call)
9670        return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
9671
9672    return result;
9673}
9674
9675VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
9676                                               const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
9677    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9678    VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
9679    if (result == VK_SUCCESS) {
9680        std::lock_guard<std::mutex> lock(global_lock);
9681        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
9682        sNode->signaled = false;
9683        sNode->queue = VK_NULL_HANDLE;
9684        sNode->in_use.store(0);
9685    }
9686    return result;
9687}
9688
9689VKAPI_ATTR VkResult VKAPI_CALL
9690CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
9691    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9692    VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
9693    if (result == VK_SUCCESS) {
9694        std::lock_guard<std::mutex> lock(global_lock);
9695        dev_data->eventMap[*pEvent].needsSignaled = false;
9696        dev_data->eventMap[*pEvent].in_use.store(0);
9697        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
9698    }
9699    return result;
9700}
9701
9702VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
9703                                                  const VkAllocationCallbacks *pAllocator,
9704                                                  VkSwapchainKHR *pSwapchain) {
9705    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9706    VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
9707
9708    if (VK_SUCCESS == result) {
9709        SWAPCHAIN_NODE *psc_node = new SWAPCHAIN_NODE(pCreateInfo);
9710        std::lock_guard<std::mutex> lock(global_lock);
9711        dev_data->device_extensions.swapchainMap[*pSwapchain] = psc_node;
9712    }
9713
9714    return result;
9715}
9716
9717VKAPI_ATTR void VKAPI_CALL
9718DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
9719    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9720    bool skipCall = false;
9721
9722    std::unique_lock<std::mutex> lock(global_lock);
9723    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(swapchain);
9724    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
9725        if (swapchain_data->second->images.size() > 0) {
9726            for (auto swapchain_image : swapchain_data->second->images) {
9727                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
9728                if (image_sub != dev_data->imageSubresourceMap.end()) {
9729                    for (auto imgsubpair : image_sub->second) {
9730                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
9731                        if (image_item != dev_data->imageLayoutMap.end()) {
9732                            dev_data->imageLayoutMap.erase(image_item);
9733                        }
9734                    }
9735                    dev_data->imageSubresourceMap.erase(image_sub);
9736                }
9737                skipCall = clear_object_binding(dev_data, (uint64_t)swapchain_image,
9738                                                VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
9739                dev_data->imageMap.erase(swapchain_image);
9740            }
9741        }
9742        delete swapchain_data->second;
9743        dev_data->device_extensions.swapchainMap.erase(swapchain);
9744    }
9745    lock.unlock();
9746    if (!skipCall)
9747        dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator);
9748}
9749
9750VKAPI_ATTR VkResult VKAPI_CALL
9751GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
9752    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9753    VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
9754
9755    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
9756        // This should never happen and is checked by param checker.
9757        if (!pCount)
9758            return result;
9759        std::lock_guard<std::mutex> lock(global_lock);
9760        const size_t count = *pCount;
9761        auto swapchain_node = dev_data->device_extensions.swapchainMap[swapchain];
9762        if (!swapchain_node->images.empty()) {
9763            // TODO : Not sure I like the memcmp here, but it works
9764            const bool mismatch = (swapchain_node->images.size() != count ||
9765                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
9766            if (mismatch) {
9767                // TODO: Verify against Valid Usage section of extension
9768                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9769                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
9770                        "vkGetSwapchainInfoKHR(0x%" PRIx64
9771                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
9772                        (uint64_t)(swapchain));
9773            }
9774        }
9775        for (uint32_t i = 0; i < *pCount; ++i) {
9776            IMAGE_LAYOUT_NODE image_layout_node;
9777            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
9778            image_layout_node.format = swapchain_node->createInfo.imageFormat;
9779            auto &image_node = dev_data->imageMap[pSwapchainImages[i]];
9780            image_node.createInfo.mipLevels = 1;
9781            image_node.createInfo.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
9782            image_node.createInfo.usage = swapchain_node->createInfo.imageUsage;
9783            image_node.createInfo.format = swapchain_node->createInfo.imageFormat;
9784            image_node.createInfo.extent.width = swapchain_node->createInfo.imageExtent.width;
9785            image_node.createInfo.extent.height = swapchain_node->createInfo.imageExtent.height;
9786            image_node.createInfo.sharingMode = swapchain_node->createInfo.imageSharingMode;
9787            image_node.valid = false;
9788            image_node.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
9789            swapchain_node->images.push_back(pSwapchainImages[i]);
9790            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
9791            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
9792            dev_data->imageLayoutMap[subpair] = image_layout_node;
9793            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
9794        }
9795    }
9796    return result;
9797}
9798
9799VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
9800    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
9801    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9802    bool skip_call = false;
9803
9804    if (pPresentInfo) {
9805        std::lock_guard<std::mutex> lock(global_lock);
9806        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
9807            const VkSemaphore &semaphore = pPresentInfo->pWaitSemaphores[i];
9808            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
9809                if (dev_data->semaphoreMap[semaphore].signaled) {
9810                    dev_data->semaphoreMap[semaphore].signaled = false;
9811                } else {
9812                    skip_call |=
9813                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9814                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9815                                "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
9816                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
9817                }
9818            }
9819        }
9820        VkDeviceMemory mem;
9821        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
9822            auto swapchain_data = dev_data->device_extensions.swapchainMap.find(pPresentInfo->pSwapchains[i]);
9823            if (swapchain_data != dev_data->device_extensions.swapchainMap.end() &&
9824                pPresentInfo->pImageIndices[i] < swapchain_data->second->images.size()) {
9825                VkImage image = swapchain_data->second->images[pPresentInfo->pImageIndices[i]];
9826#if MTMERGESOURCE
9827                skip_call |=
9828                    get_mem_binding_from_object(dev_data, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
9829                skip_call |= validate_memory_is_valid(dev_data, mem, "vkQueuePresentKHR()", image);
9830#endif
9831                vector<VkImageLayout> layouts;
9832                if (FindLayouts(dev_data, image, layouts)) {
9833                    for (auto layout : layouts) {
9834                        if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
9835                            skip_call |=
9836                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
9837                                        reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9838                                        "Images passed to present must be in layout "
9839                                        "PRESENT_SOURCE_KHR but is in %s",
9840                                        string_VkImageLayout(layout));
9841                        }
9842                    }
9843                }
9844            }
9845        }
9846    }
9847
9848    if (!skip_call)
9849        result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo);
9850
9851    return result;
9852}
9853
9854VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
9855                                                   VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
9856    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9857    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9858    bool skipCall = false;
9859
9860    std::unique_lock<std::mutex> lock(global_lock);
9861    if (semaphore != VK_NULL_HANDLE &&
9862        dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
9863        if (dev_data->semaphoreMap[semaphore].signaled) {
9864            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9865                               reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9866                               "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
9867        }
9868        dev_data->semaphoreMap[semaphore].signaled = true;
9869    }
9870    auto fence_data = dev_data->fenceMap.find(fence);
9871    if (fence_data != dev_data->fenceMap.end()) {
9872        fence_data->second.swapchain = swapchain;
9873    }
9874    lock.unlock();
9875
9876    if (!skipCall) {
9877        result =
9878            dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
9879    }
9880
9881    return result;
9882}
9883
9884VKAPI_ATTR VkResult VKAPI_CALL
9885CreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
9886                             const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
9887    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
9888    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
9889    VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
9890    if (VK_SUCCESS == res) {
9891        std::lock_guard<std::mutex> lock(global_lock);
9892        res = layer_create_msg_callback(my_data->report_data, pCreateInfo, pAllocator, pMsgCallback);
9893    }
9894    return res;
9895}
9896
9897VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance,
9898                                                         VkDebugReportCallbackEXT msgCallback,
9899                                                         const VkAllocationCallbacks *pAllocator) {
9900    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
9901    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
9902    pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
9903    std::lock_guard<std::mutex> lock(global_lock);
9904    layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
9905}
9906
9907VKAPI_ATTR void VKAPI_CALL
9908DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
9909                      size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
9910    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
9911    my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
9912                                                            pMsg);
9913}
9914
9915VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
9916                                                                  const char *pLayerName, uint32_t *pCount,
9917                                                                  VkExtensionProperties *pProperties) {
9918    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
9919        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
9920
9921    assert(physicalDevice);
9922
9923    dispatch_key key = get_dispatch_key(physicalDevice);
9924    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
9925    return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
9926}
9927
9928static PFN_vkVoidFunction
9929intercept_core_instance_command(const char *name);
9930
9931static PFN_vkVoidFunction
9932intercept_core_device_command(const char *name);
9933
9934static PFN_vkVoidFunction
9935intercept_khr_swapchain_command(const char *name, VkDevice dev);
9936
9937VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) {
9938    PFN_vkVoidFunction proc = intercept_core_device_command(funcName);
9939    if (proc)
9940        return proc;
9941
9942    assert(dev);
9943
9944    proc = intercept_khr_swapchain_command(funcName, dev);
9945    if (proc)
9946        return proc;
9947
9948    layer_data *dev_data;
9949    dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
9950
9951    VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
9952    {
9953        if (pTable->GetDeviceProcAddr == NULL)
9954            return NULL;
9955        return pTable->GetDeviceProcAddr(dev, funcName);
9956    }
9957}
9958
9959VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
9960    PFN_vkVoidFunction proc = intercept_core_instance_command(funcName);
9961    if (!proc)
9962        proc = intercept_core_device_command(funcName);
9963    if (!proc)
9964        proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE);
9965    if (proc)
9966        return proc;
9967
9968    assert(instance);
9969
9970    layer_data *my_data;
9971    my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
9972    proc = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
9973    if (proc)
9974        return proc;
9975
9976    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
9977    if (pTable->GetInstanceProcAddr == NULL)
9978        return NULL;
9979    return pTable->GetInstanceProcAddr(instance, funcName);
9980}
9981
9982static PFN_vkVoidFunction
9983intercept_core_instance_command(const char *name) {
9984    static const struct {
9985        const char *name;
9986        PFN_vkVoidFunction proc;
9987    } core_instance_commands[] = {
9988        { "vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr) },
9989        { "vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr) },
9990        { "vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance) },
9991        { "vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice) },
9992        { "vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance) },
9993        { "vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties) },
9994    };
9995
9996    // we should never be queried for these commands
9997    assert(strcmp(name, "vkEnumerateInstanceLayerProperties") &&
9998           strcmp(name, "vkEnumerateInstanceExtensionProperties") &&
9999           strcmp(name, "vkEnumerateDeviceLayerProperties"));
10000
10001    for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) {
10002        if (!strcmp(core_instance_commands[i].name, name))
10003            return core_instance_commands[i].proc;
10004    }
10005
10006    return nullptr;
10007}
10008
10009static PFN_vkVoidFunction
10010intercept_core_device_command(const char *name) {
10011    static const struct {
10012        const char *name;
10013        PFN_vkVoidFunction proc;
10014    } core_device_commands[] = {
10015        {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
10016        {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)},
10017        {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)},
10018        {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)},
10019        {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)},
10020        {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)},
10021        {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)},
10022        {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
10023        {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)},
10024        {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)},
10025        {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)},
10026        {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)},
10027        {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)},
10028        {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)},
10029        {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)},
10030        {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)},
10031        {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)},
10032        {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)},
10033        {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)},
10034        {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)},
10035        {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)},
10036        {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)},
10037        {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)},
10038        {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)},
10039        {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)},
10040        {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)},
10041        {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)},
10042        {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)},
10043        {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)},
10044        {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)},
10045        {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)},
10046        {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)},
10047        {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)},
10048        {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)},
10049        {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)},
10050        {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)},
10051        {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)},
10052        {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)},
10053        {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)},
10054        {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)},
10055        {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)},
10056        {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)},
10057        {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)},
10058        {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)},
10059        {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)},
10060        {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)},
10061        {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)},
10062        {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)},
10063        {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)},
10064        {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)},
10065        {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)},
10066        {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)},
10067        {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)},
10068        {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)},
10069        {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)},
10070        {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)},
10071        {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)},
10072        {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)},
10073        {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)},
10074        {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)},
10075        {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)},
10076        {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)},
10077        {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)},
10078        {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)},
10079        {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)},
10080        {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)},
10081        {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)},
10082        {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)},
10083        {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)},
10084        {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)},
10085        {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)},
10086        {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)},
10087        {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)},
10088        {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)},
10089        {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)},
10090        {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)},
10091        {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)},
10092        {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)},
10093        {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)},
10094        {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)},
10095        {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)},
10096        {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)},
10097        {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)},
10098        {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)},
10099        {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)},
10100        {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)},
10101        {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)},
10102        {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)},
10103        {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)},
10104        {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)},
10105        {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)},
10106        {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)},
10107        {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)},
10108        {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)},
10109        {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)},
10110        {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)},
10111        {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)},
10112        {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)},
10113        {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)},
10114        {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)},
10115        {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)},
10116        {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)},
10117        {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)},
10118        {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)},
10119        {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)},
10120        {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)},
10121        {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)},
10122        {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)},
10123        {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)},
10124        {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)},
10125        {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)},
10126        {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)},
10127        {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)},
10128        {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)},
10129        {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)},
10130        {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)},
10131    };
10132
10133    for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) {
10134        if (!strcmp(core_device_commands[i].name, name))
10135            return core_device_commands[i].proc;
10136    }
10137
10138    return nullptr;
10139}
10140
10141static PFN_vkVoidFunction
10142intercept_khr_swapchain_command(const char *name, VkDevice dev) {
10143    static const struct {
10144        const char *name;
10145        PFN_vkVoidFunction proc;
10146    } khr_swapchain_commands[] = {
10147        { "vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR) },
10148        { "vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR) },
10149        { "vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR) },
10150        { "vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR) },
10151        { "vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR) },
10152    };
10153
10154    if (dev) {
10155        layer_data *dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
10156        if (!dev_data->device_extensions.wsi_enabled)
10157            return nullptr;
10158    }
10159
10160    for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) {
10161        if (!strcmp(khr_swapchain_commands[i].name, name))
10162            return khr_swapchain_commands[i].proc;
10163    }
10164
10165    return nullptr;
10166}
10167
10168} // namespace core_validation
10169
10170// vk_layer_logging.h expects these to be defined
10171
10172VKAPI_ATTR VkResult VKAPI_CALL
10173vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10174                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
10175    return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10176}
10177
10178VKAPI_ATTR void VKAPI_CALL
10179vkDestroyDebugReportCallbackEXT(VkInstance instance,
10180                                VkDebugReportCallbackEXT msgCallback,
10181                                const VkAllocationCallbacks *pAllocator) {
10182    core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10183}
10184
10185VKAPI_ATTR void VKAPI_CALL
10186vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
10187                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10188    core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
10189}
10190
10191// loader-layer interface v0
10192
10193VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10194vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
10195    return util_GetExtensionProperties(1, core_validation::instance_extensions, pCount, pProperties);
10196}
10197
10198VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10199vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
10200    return util_GetLayerProperties(1, &core_validation::global_layer, pCount, pProperties);
10201}
10202
10203VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10204vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
10205    return util_GetLayerProperties(1, &core_validation::global_layer, pCount, pProperties);
10206}
10207
10208VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
10209                                                                                    const char *pLayerName, uint32_t *pCount,
10210                                                                                    VkExtensionProperties *pProperties) {
10211    // the layer command handles VK_NULL_HANDLE just fine
10212    return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
10213}
10214
10215VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10216    return core_validation::GetDeviceProcAddr(dev, funcName);
10217}
10218
10219VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
10220    if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties"))
10221        return reinterpret_cast<PFN_vkVoidFunction>(vkEnumerateInstanceLayerProperties);
10222    if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties"))
10223        return reinterpret_cast<PFN_vkVoidFunction>(vkEnumerateDeviceLayerProperties);
10224    if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties"))
10225        return reinterpret_cast<PFN_vkVoidFunction>(vkEnumerateInstanceExtensionProperties);
10226    if (!strcmp(funcName, "vkGetInstanceProcAddr"))
10227        return reinterpret_cast<PFN_vkVoidFunction>(vkGetInstanceProcAddr);
10228
10229    return core_validation::GetInstanceProcAddr(instance, funcName);
10230}
10231