core_validation.cpp revision f0d091deca3d4b90e1f31d75e062684dca160ad1
1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and/or associated documentation files (the "Materials"), to
8 * deal in the Materials without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Materials, and to permit persons to whom the Materials
11 * are furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice(s) and this permission notice shall be included
14 * in all copies or substantial portions of the Materials.
15 *
16 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 *
20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE
23 * USE OR OTHER DEALINGS IN THE MATERIALS
24 *
25 * Author: Cody Northrop <cnorthrop@google.com>
26 * Author: Michael Lentine <mlentine@google.com>
27 * Author: Tobin Ehlis <tobine@google.com>
28 * Author: Chia-I Wu <olv@google.com>
29 * Author: Chris Forbes <chrisf@ijw.co.nz>
30 * Author: Mark Lobodzinski <mark@lunarg.com>
31 * Author: Ian Elliott <ianelliott@google.com>
32 */
33
34// Allow use of STL min and max functions in Windows
35#define NOMINMAX
36
37// Turn on mem_tracker merged code
38#define MTMERGE 1
39
40#include <stdio.h>
41#include <stdlib.h>
42#include <string.h>
43#include <assert.h>
44#include <unordered_map>
45#include <unordered_set>
46#include <map>
47#include <string>
48#include <iostream>
49#include <algorithm>
50#include <list>
51#include <SPIRV/spirv.hpp>
52#include <set>
53
54#include "vk_loader_platform.h"
55#include "vk_dispatch_table_helper.h"
56#include "vk_struct_string_helper_cpp.h"
57#if defined(__GNUC__)
58#pragma GCC diagnostic ignored "-Wwrite-strings"
59#endif
60#if defined(__GNUC__)
61#pragma GCC diagnostic warning "-Wwrite-strings"
62#endif
63#include "vk_struct_size_helper.h"
64#include "core_validation.h"
65#include "vk_layer_config.h"
66#include "vk_layer_table.h"
67#include "vk_layer_data.h"
68#include "vk_layer_logging.h"
69#include "vk_layer_extension_utils.h"
70#include "vk_layer_utils.h"
71
72#if defined __ANDROID__
73#include <android/log.h>
74#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
75#else
76#define LOGCONSOLE(...) printf(__VA_ARGS__)
77#endif
78
79using std::unordered_map;
80using std::unordered_set;
81
82#if MTMERGE
83// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
84// Object value will be used to identify them internally.
85static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
86#endif
87// Track command pools and their command buffers
88struct CMD_POOL_INFO {
89    VkCommandPoolCreateFlags createFlags;
90    uint32_t queueFamilyIndex;
91    list<VkCommandBuffer> commandBuffers; // list container of cmd buffers allocated from this pool
92};
93
94struct devExts {
95    VkBool32 wsi_enabled;
96    unordered_map<VkSwapchainKHR, SWAPCHAIN_NODE *> swapchainMap;
97    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
98};
99
100// fwd decls
101struct shader_module;
102struct render_pass;
103
104struct layer_data {
105    debug_report_data *report_data;
106    std::vector<VkDebugReportCallbackEXT> logging_callback;
107    VkLayerDispatchTable *device_dispatch_table;
108    VkLayerInstanceDispatchTable *instance_dispatch_table;
109#if MTMERGE
110// MTMERGE - stuff pulled directly from MT
111    uint64_t currentFenceId;
112    // Maps for tracking key structs related to mem_tracker state
113    unordered_map<VkCommandBuffer, MT_CB_INFO> cbMap;
114    // Merged w/ draw_state maps below
115    //unordered_map<VkCommandPool, MT_CMD_POOL_INFO> commandPoolMap;
116    //unordered_map<VkFence, MT_FENCE_INFO> fenceMap;
117    //unordered_map<VkQueue, MT_QUEUE_INFO> queueMap;
118    //unordered_map<VkSemaphore, MtSemaphoreState> semaphoreMap;
119    //unordered_map<VkImageView, MT_IMAGE_VIEW_INFO> imageViewMap;
120    //unordered_map<VkBufferView, VkBufferViewCreateInfo> bufferViewMap;
121    unordered_map<VkFramebuffer, MT_FB_INFO> fbMap;
122    unordered_map<VkRenderPass, MT_PASS_INFO> passMap;
123    unordered_map<VkDescriptorSet, MT_DESCRIPTOR_SET_INFO> descriptorSetMap;
124    // Images and Buffers are 2 objects that can have memory bound to them so they get special treatment
125    unordered_map<uint64_t, MT_OBJ_BINDING_INFO> imageBindingMap;
126    unordered_map<uint64_t, MT_OBJ_BINDING_INFO> bufferBindingMap;
127// MTMERGE - End of MT stuff
128#endif
129    devExts device_extensions;
130    vector<VkQueue> queues; // all queues under given device
131    // Global set of all cmdBuffers that are inFlight on this device
132    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
133    // Layer specific data
134    unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> sampleMap;
135    unordered_map<VkImageView, VkImageViewCreateInfo> imageViewMap;
136    unordered_map<VkImage, IMAGE_NODE> imageMap;
137    unordered_map<VkBufferView, VkBufferViewCreateInfo> bufferViewMap;
138    unordered_map<VkBuffer, BUFFER_NODE> bufferMap;
139    unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
140    unordered_map<VkCommandPool, CMD_POOL_INFO> commandPoolMap;
141    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
142    unordered_map<VkDescriptorSet, SET_NODE *> setMap;
143    unordered_map<VkDescriptorSetLayout, LAYOUT_NODE *> descriptorSetLayoutMap;
144    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
145    unordered_map<VkDeviceMemory, DEVICE_MEM_INFO> memObjMap;
146    unordered_map<VkFence, FENCE_NODE> fenceMap;
147    unordered_map<VkQueue, QUEUE_NODE> queueMap;
148    unordered_map<VkEvent, EVENT_NODE> eventMap;
149    unordered_map<QueryObject, bool> queryToStateMap;
150    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
151    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
152    unordered_map<void *, GLOBAL_CB_NODE *> commandBufferMap;
153    unordered_map<VkFramebuffer, FRAMEBUFFER_NODE> frameBufferMap;
154    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
155    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
156    unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
157    unordered_map<VkShaderModule, shader_module *> shaderModuleMap;
158    // Current render pass
159    VkRenderPassBeginInfo renderPassBeginInfo;
160    uint32_t currentSubpass;
161
162    // Device specific data
163    PHYS_DEV_PROPERTIES_NODE physDevProperties;
164// MTMERGE - added a couple of fields to constructor initializer
165    layer_data()
166        : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr),
167#if MTMERGE
168        currentFenceId(1),
169#endif
170        device_extensions(){};
171};
172
173static const VkLayerProperties cv_global_layers[] = {{
174    "VK_LAYER_LUNARG_core_validation", VK_API_VERSION, 1, "LunarG Validation Layer",
175}};
176
177template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
178    bool foundLayer = false;
179    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
180        if (!strcmp(createInfo.ppEnabledLayerNames[i], cv_global_layers[0].layerName)) {
181            foundLayer = true;
182        }
183        // This has to be logged to console as we don't have a callback at this point.
184        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
185            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
186                       cv_global_layers[0].layerName);
187        }
188    }
189}
190
191// Code imported from shader_checker
192static void build_def_index(shader_module *);
193
194// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
195// without the caller needing to care too much about the physical SPIRV module layout.
196struct spirv_inst_iter {
197    std::vector<uint32_t>::const_iterator zero;
198    std::vector<uint32_t>::const_iterator it;
199
200    uint32_t len() { return *it >> 16; }
201    uint32_t opcode() { return *it & 0x0ffffu; }
202    uint32_t const &word(unsigned n) { return it[n]; }
203    uint32_t offset() { return (uint32_t)(it - zero); }
204
205    spirv_inst_iter() {}
206
207    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
208
209    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
210
211    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
212
213    spirv_inst_iter operator++(int) { /* x++ */
214        spirv_inst_iter ii = *this;
215        it += len();
216        return ii;
217    }
218
219    spirv_inst_iter operator++() { /* ++x; */
220        it += len();
221        return *this;
222    }
223
224    /* The iterator and the value are the same thing. */
225    spirv_inst_iter &operator*() { return *this; }
226    spirv_inst_iter const &operator*() const { return *this; }
227};
228
229struct shader_module {
230    /* the spirv image itself */
231    vector<uint32_t> words;
232    /* a mapping of <id> to the first word of its def. this is useful because walking type
233     * trees, constant expressions, etc requires jumping all over the instruction stream.
234     */
235    unordered_map<unsigned, unsigned> def_index;
236
237    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
238        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
239          def_index() {
240
241        build_def_index(this);
242    }
243
244    /* expose begin() / end() to enable range-based for */
245    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
246    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
247    /* given an offset into the module, produce an iterator there. */
248    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
249
250    /* gets an iterator to the definition of an id */
251    spirv_inst_iter get_def(unsigned id) const {
252        auto it = def_index.find(id);
253        if (it == def_index.end()) {
254            return end();
255        }
256        return at(it->second);
257    }
258};
259
260// TODO : Do we need to guard access to layer_data_map w/ lock?
261static unordered_map<void *, layer_data *> layer_data_map;
262
263// TODO : This can be much smarter, using separate locks for separate global data
264static int globalLockInitialized = 0;
265static loader_platform_thread_mutex globalLock;
266#define MAX_TID 513
267static loader_platform_thread_id g_tidMapping[MAX_TID] = {0};
268static uint32_t g_maxTID = 0;
269#if MTMERGE
270// MTMERGE - start of direct pull
271static VkPhysicalDeviceMemoryProperties memProps;
272
273static VkBool32 clear_cmd_buf_and_mem_references(layer_data *my_data, const VkCommandBuffer cb);
274
275#define MAX_BINDING 0xFFFFFFFF
276
277static MT_OBJ_BINDING_INFO *get_object_binding_info(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
278    MT_OBJ_BINDING_INFO *retValue = NULL;
279    switch (type) {
280    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
281        auto it = my_data->imageBindingMap.find(handle);
282        if (it != my_data->imageBindingMap.end())
283            return &(*it).second;
284        break;
285    }
286    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
287        auto it = my_data->bufferBindingMap.find(handle);
288        if (it != my_data->bufferBindingMap.end())
289            return &(*it).second;
290        break;
291    }
292    default:
293        break;
294    }
295    return retValue;
296}
297// MTMERGE - end section
298#endif
299template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map);
300
301#if MTMERGE
302static void delete_queue_info_list(layer_data *my_data) {
303    // Process queue list, cleaning up each entry before deleting
304    my_data->queueMap.clear();
305}
306
307// Add new CBInfo for this cb to map container
308static void add_cmd_buf_info(layer_data *my_data, VkCommandPool commandPool, const VkCommandBuffer cb) {
309    my_data->cbMap[cb].commandBuffer = cb;
310    my_data->commandPoolMap[commandPool].commandBuffers.push_front(cb);
311}
312
313// Delete CBInfo from container and clear mem references to CB
314static VkBool32 delete_cmd_buf_info(layer_data *my_data, VkCommandPool commandPool, const VkCommandBuffer cb) {
315    VkBool32 result = VK_TRUE;
316    result = clear_cmd_buf_and_mem_references(my_data, cb);
317    // Delete the CBInfo info
318    if (result != VK_TRUE) {
319        my_data->commandPoolMap[commandPool].commandBuffers.remove(cb);
320        my_data->cbMap.erase(cb);
321    }
322    return result;
323}
324
325// Return ptr to Info in CB map, or NULL if not found
326static MT_CB_INFO *get_cmd_buf_info(layer_data *my_data, const VkCommandBuffer cb) {
327    auto item = my_data->cbMap.find(cb);
328    if (item != my_data->cbMap.end()) {
329        return &(*item).second;
330    } else {
331        return NULL;
332    }
333}
334
335static void add_object_binding_info(layer_data *my_data, const uint64_t handle, const VkDebugReportObjectTypeEXT type,
336                                    const VkDeviceMemory mem) {
337    switch (type) {
338    // Buffers and images are unique as their CreateInfo is in container struct
339    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
340        auto pCI = &my_data->bufferBindingMap[handle];
341        pCI->mem = mem;
342        break;
343    }
344    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
345        auto pCI = &my_data->imageBindingMap[handle];
346        pCI->mem = mem;
347        break;
348    }
349    default:
350        break;
351    }
352}
353
354static void add_object_create_info(layer_data *my_data, const uint64_t handle, const VkDebugReportObjectTypeEXT type,
355                                   const void *pCreateInfo) {
356    // TODO : For any CreateInfo struct that has ptrs, need to deep copy them and appropriately clean up on Destroy
357    switch (type) {
358    // Buffers and images are unique as their CreateInfo is in container struct
359    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
360        auto pCI = &my_data->bufferBindingMap[handle];
361        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
362        memcpy(&pCI->create_info.buffer, pCreateInfo, sizeof(VkBufferCreateInfo));
363        break;
364    }
365    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
366        auto pCI = &my_data->imageBindingMap[handle];
367        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
368        memcpy(&pCI->create_info.image, pCreateInfo, sizeof(VkImageCreateInfo));
369        break;
370    }
371    // Swap Chain is very unique, use my_data->imageBindingMap, but copy in
372    // SwapChainCreatInfo's usage flags and set the mem value to a unique key. These is used by
373    // vkCreateImageView and internal mem_tracker routines to distinguish swap chain images
374    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT: {
375        auto pCI = &my_data->imageBindingMap[handle];
376        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
377        pCI->mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
378        pCI->valid = false;
379        pCI->create_info.image.usage =
380            const_cast<VkSwapchainCreateInfoKHR *>(static_cast<const VkSwapchainCreateInfoKHR *>(pCreateInfo))->imageUsage;
381        break;
382    }
383    default:
384        break;
385    }
386}
387
388// Add a fence, creating one if necessary to our list of fences/fenceIds
389static VkBool32 add_fence_info(layer_data *my_data, VkFence fence, VkQueue queue, uint64_t *fenceId) {
390    VkBool32 skipCall = VK_FALSE;
391    *fenceId = my_data->currentFenceId++;
392
393    // If no fence, create an internal fence to track the submissions
394    if (fence != VK_NULL_HANDLE) {
395        my_data->fenceMap[fence].fenceId = *fenceId;
396        my_data->fenceMap[fence].queue = queue;
397        // Validate that fence is in UNSIGNALED state
398        VkFenceCreateInfo *pFenceCI = &(my_data->fenceMap[fence].createInfo);
399        if (pFenceCI->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
400            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
401                               (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
402                               "Fence %#" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
403                               (uint64_t)fence);
404        }
405    } else {
406        // TODO : Do we need to create an internal fence here for tracking purposes?
407    }
408    // Update most recently submitted fence and fenceId for Queue
409    my_data->queueMap[queue].lastSubmittedId = *fenceId;
410    return skipCall;
411}
412
413// Remove a fenceInfo from our list of fences/fenceIds
414static void delete_fence_info(layer_data *my_data, VkFence fence) { my_data->fenceMap.erase(fence); }
415
416// Record information when a fence is known to be signalled
417static void update_fence_tracking(layer_data *my_data, VkFence fence) {
418    auto fence_item = my_data->fenceMap.find(fence);
419    if (fence_item != my_data->fenceMap.end()) {
420        FENCE_NODE *pCurFenceInfo = &(*fence_item).second;
421        VkQueue queue = pCurFenceInfo->queue;
422        auto queue_item = my_data->queueMap.find(queue);
423        if (queue_item != my_data->queueMap.end()) {
424            QUEUE_NODE *pQueueInfo = &(*queue_item).second;
425            if (pQueueInfo->lastRetiredId < pCurFenceInfo->fenceId) {
426                pQueueInfo->lastRetiredId = pCurFenceInfo->fenceId;
427            }
428        }
429    }
430
431    // Update fence state in fenceCreateInfo structure
432    auto pFCI = &(my_data->fenceMap[fence].createInfo);
433    pFCI->flags = static_cast<VkFenceCreateFlags>(pFCI->flags | VK_FENCE_CREATE_SIGNALED_BIT);
434}
435
436// Helper routine that updates the fence list for a specific queue to all-retired
437static void retire_queue_fences(layer_data *my_data, VkQueue queue) {
438    QUEUE_NODE *pQueueInfo = &my_data->queueMap[queue];
439    // Set queue's lastRetired to lastSubmitted indicating all fences completed
440    pQueueInfo->lastRetiredId = pQueueInfo->lastSubmittedId;
441}
442
443// Helper routine that updates all queues to all-retired
444static void retire_device_fences(layer_data *my_data, VkDevice device) {
445    // Process each queue for device
446    // TODO: Add multiple device support
447    for (auto ii = my_data->queueMap.begin(); ii != my_data->queueMap.end(); ++ii) {
448        // Set queue's lastRetired to lastSubmitted indicating all fences completed
449        QUEUE_NODE *pQueueInfo = &(*ii).second;
450        pQueueInfo->lastRetiredId = pQueueInfo->lastSubmittedId;
451    }
452}
453
454// Helper function to validate correct usage bits set for buffers or images
455//  Verify that (actual & desired) flags != 0 or,
456//   if strict is true, verify that (actual & desired) flags == desired
457//  In case of error, report it via dbg callbacks
458static VkBool32 validate_usage_flags(layer_data *my_data, void *disp_obj, VkFlags actual, VkFlags desired, VkBool32 strict,
459                                     uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
460                                     char const *func_name, char const *usage_str) {
461    VkBool32 correct_usage = VK_FALSE;
462    VkBool32 skipCall = VK_FALSE;
463    if (strict)
464        correct_usage = ((actual & desired) == desired);
465    else
466        correct_usage = ((actual & desired) != 0);
467    if (!correct_usage) {
468        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
469                           MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s %#" PRIxLEAST64
470                                                               " used by %s. In this case, %s should have %s set during creation.",
471                           ty_str, obj_handle, func_name, ty_str, usage_str);
472    }
473    return skipCall;
474}
475
476// Helper function to validate usage flags for images
477// Pulls image info and then sends actual vs. desired usage off to helper above where
478//  an error will be flagged if usage is not correct
479static VkBool32 validate_image_usage_flags(layer_data *my_data, void *disp_obj, VkImage image, VkFlags desired, VkBool32 strict,
480                                           char const *func_name, char const *usage_string) {
481    VkBool32 skipCall = VK_FALSE;
482    MT_OBJ_BINDING_INFO *pBindInfo = get_object_binding_info(my_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
483    if (pBindInfo) {
484        skipCall = validate_usage_flags(my_data, disp_obj, pBindInfo->create_info.image.usage, desired, strict, (uint64_t)image,
485                                        VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "image", func_name, usage_string);
486    }
487    return skipCall;
488}
489
490// Helper function to validate usage flags for buffers
491// Pulls buffer info and then sends actual vs. desired usage off to helper above where
492//  an error will be flagged if usage is not correct
493static VkBool32 validate_buffer_usage_flags(layer_data *my_data, void *disp_obj, VkBuffer buffer, VkFlags desired, VkBool32 strict,
494                                            char const *func_name, char const *usage_string) {
495    VkBool32 skipCall = VK_FALSE;
496    MT_OBJ_BINDING_INFO *pBindInfo = get_object_binding_info(my_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
497    if (pBindInfo) {
498        skipCall = validate_usage_flags(my_data, disp_obj, pBindInfo->create_info.buffer.usage, desired, strict, (uint64_t)buffer,
499                                        VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string);
500    }
501    return skipCall;
502}
503
504// Return ptr to info in map container containing mem, or NULL if not found
505//  Calls to this function should be wrapped in mutex
506static DEVICE_MEM_INFO *get_mem_obj_info(layer_data *dev_data, const VkDeviceMemory mem) {
507    auto item = dev_data->memObjMap.find(mem);
508    if (item != dev_data->memObjMap.end()) {
509        return &(*item).second;
510    } else {
511        return NULL;
512    }
513}
514
515static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
516                             const VkMemoryAllocateInfo *pAllocateInfo) {
517    assert(object != NULL);
518
519    memcpy(&my_data->memObjMap[mem].allocInfo, pAllocateInfo, sizeof(VkMemoryAllocateInfo));
520    // TODO:  Update for real hardware, actually process allocation info structures
521    my_data->memObjMap[mem].allocInfo.pNext = NULL;
522    my_data->memObjMap[mem].object = object;
523    my_data->memObjMap[mem].refCount = 0;
524    my_data->memObjMap[mem].mem = mem;
525    my_data->memObjMap[mem].image = VK_NULL_HANDLE;
526    my_data->memObjMap[mem].memRange.offset = 0;
527    my_data->memObjMap[mem].memRange.size = 0;
528    my_data->memObjMap[mem].pData = 0;
529    my_data->memObjMap[mem].pDriverData = 0;
530    my_data->memObjMap[mem].valid = false;
531}
532
533static VkBool32 validate_memory_is_valid(layer_data *dev_data, VkDeviceMemory mem, const char *functionName,
534                                         VkImage image = VK_NULL_HANDLE) {
535    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
536        MT_OBJ_BINDING_INFO *pBindInfo =
537            get_object_binding_info(dev_data, reinterpret_cast<const uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
538        if (pBindInfo && !pBindInfo->valid) {
539            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
540                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
541                           "%s: Cannot read invalid swapchain image %" PRIx64 ", please fill the memory before using.",
542                           functionName, (uint64_t)(image));
543        }
544    } else {
545        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
546        if (pMemObj && !pMemObj->valid) {
547            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
548                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
549                           "%s: Cannot read invalid memory %" PRIx64 ", please fill the memory before using.", functionName,
550                           (uint64_t)(mem));
551        }
552    }
553    return false;
554}
555
556static void set_memory_valid(layer_data *dev_data, VkDeviceMemory mem, bool valid, VkImage image = VK_NULL_HANDLE) {
557    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
558        MT_OBJ_BINDING_INFO *pBindInfo =
559            get_object_binding_info(dev_data, reinterpret_cast<const uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
560        if (pBindInfo) {
561            pBindInfo->valid = valid;
562        }
563    } else {
564        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
565        if (pMemObj) {
566            pMemObj->valid = valid;
567        }
568    }
569}
570
571// Find CB Info and add mem reference to list container
572// Find Mem Obj Info and add CB reference to list container
573static VkBool32 update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
574                                                  const char *apiName) {
575    VkBool32 skipCall = VK_FALSE;
576
577    // Skip validation if this image was created through WSI
578    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
579
580        // First update CB binding in MemObj mini CB list
581        DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
582        if (pMemInfo) {
583            // Search for cmd buffer object in memory object's binding list
584            VkBool32 found = VK_FALSE;
585            if (pMemInfo->pCommandBufferBindings.size() > 0) {
586                for (list<VkCommandBuffer>::iterator it = pMemInfo->pCommandBufferBindings.begin();
587                     it != pMemInfo->pCommandBufferBindings.end(); ++it) {
588                    if ((*it) == cb) {
589                        found = VK_TRUE;
590                        break;
591                    }
592                }
593            }
594            // If not present, add to list
595            if (found == VK_FALSE) {
596                pMemInfo->pCommandBufferBindings.push_front(cb);
597                pMemInfo->refCount++;
598            }
599            // Now update CBInfo's Mem reference list
600            MT_CB_INFO *pCBInfo = get_cmd_buf_info(dev_data, cb);
601            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
602            if (pCBInfo) {
603                // Search for memory object in cmd buffer's reference list
604                VkBool32 found = VK_FALSE;
605                if (pCBInfo->pMemObjList.size() > 0) {
606                    for (auto it = pCBInfo->pMemObjList.begin(); it != pCBInfo->pMemObjList.end(); ++it) {
607                        if ((*it) == mem) {
608                            found = VK_TRUE;
609                            break;
610                        }
611                    }
612                }
613                // If not present, add to list
614                if (found == VK_FALSE) {
615                    pCBInfo->pMemObjList.push_front(mem);
616                }
617            }
618        }
619    }
620    return skipCall;
621}
622
623// Free bindings related to CB
624static VkBool32 clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
625    VkBool32 skipCall = VK_FALSE;
626    MT_CB_INFO *pCBInfo = get_cmd_buf_info(dev_data, cb);
627
628    if (pCBInfo) {
629        if (pCBInfo->pMemObjList.size() > 0) {
630            list<VkDeviceMemory> mem_obj_list = pCBInfo->pMemObjList;
631            for (list<VkDeviceMemory>::iterator it = mem_obj_list.begin(); it != mem_obj_list.end(); ++it) {
632                DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, *it);
633                if (pInfo) {
634                    pInfo->pCommandBufferBindings.remove(cb);
635                    pInfo->refCount--;
636                }
637            }
638            pCBInfo->pMemObjList.clear();
639        }
640        pCBInfo->activeDescriptorSets.clear();
641        pCBInfo->validate_functions.clear();
642    }
643    return skipCall;
644}
645
646// Delete the entire CB list
647static VkBool32 delete_cmd_buf_info_list(layer_data *my_data) {
648    VkBool32 skipCall = VK_FALSE;
649    for (unordered_map<VkCommandBuffer, MT_CB_INFO>::iterator ii = my_data->cbMap.begin(); ii != my_data->cbMap.end(); ++ii) {
650        skipCall |= clear_cmd_buf_and_mem_references(my_data, (*ii).first);
651    }
652    my_data->cbMap.clear();
653    return skipCall;
654}
655
656// For given MemObjInfo, report Obj & CB bindings
657static VkBool32 reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
658    VkBool32 skipCall = VK_FALSE;
659    size_t cmdBufRefCount = pMemObjInfo->pCommandBufferBindings.size();
660    size_t objRefCount = pMemObjInfo->pObjBindings.size();
661
662    if ((pMemObjInfo->pCommandBufferBindings.size()) != 0) {
663        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
664                           (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
665                           "Attempting to free memory object %#" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
666                           " references",
667                           (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
668    }
669
670    if (cmdBufRefCount > 0 && pMemObjInfo->pCommandBufferBindings.size() > 0) {
671        for (list<VkCommandBuffer>::const_iterator it = pMemObjInfo->pCommandBufferBindings.begin();
672             it != pMemObjInfo->pCommandBufferBindings.end(); ++it) {
673            // TODO : CommandBuffer should be source Obj here
674            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
675                    (uint64_t)(*it), __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
676                    "Command Buffer %p still has a reference to mem obj %#" PRIxLEAST64, (*it), (uint64_t)pMemObjInfo->mem);
677        }
678        // Clear the list of hanging references
679        pMemObjInfo->pCommandBufferBindings.clear();
680    }
681
682    if (objRefCount > 0 && pMemObjInfo->pObjBindings.size() > 0) {
683        for (auto it = pMemObjInfo->pObjBindings.begin(); it != pMemObjInfo->pObjBindings.end(); ++it) {
684            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, it->type, it->handle, __LINE__,
685                    MEMTRACK_FREED_MEM_REF, "MEM", "VK Object %#" PRIxLEAST64 " still has a reference to mem obj %#" PRIxLEAST64,
686                    it->handle, (uint64_t)pMemObjInfo->mem);
687        }
688        // Clear the list of hanging references
689        pMemObjInfo->pObjBindings.clear();
690    }
691    return skipCall;
692}
693
694static VkBool32 deleteMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem) {
695    VkBool32 skipCall = VK_FALSE;
696    auto item = my_data->memObjMap.find(mem);
697    if (item != my_data->memObjMap.end()) {
698        my_data->memObjMap.erase(item);
699    } else {
700        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
701                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
702                           "Request to delete memory object %#" PRIxLEAST64 " not present in memory Object Map", (uint64_t)mem);
703    }
704    return skipCall;
705}
706
707// Check if fence for given CB is completed
708static bool checkCBCompleted(layer_data *my_data, const VkCommandBuffer cb, bool *complete) {
709    MT_CB_INFO *pCBInfo = get_cmd_buf_info(my_data, cb);
710    VkBool32 skipCall = false;
711    *complete = true;
712
713    if (pCBInfo) {
714        if (pCBInfo->lastSubmittedQueue != NULL) {
715            VkQueue queue = pCBInfo->lastSubmittedQueue;
716            QUEUE_NODE *pQueueInfo = &my_data->queueMap[queue];
717            if (pCBInfo->fenceId > pQueueInfo->lastRetiredId) {
718                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
719                                   VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)cb, __LINE__, MEMTRACK_NONE, "MEM",
720                                   "fence %#" PRIxLEAST64 " for CB %p has not been checked for completion",
721                                   (uint64_t)pCBInfo->lastSubmittedFence, cb);
722                *complete = false;
723            }
724        }
725    }
726    return skipCall;
727}
728
729static VkBool32 freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, VkBool32 internal) {
730    VkBool32 skipCall = VK_FALSE;
731    // Parse global list to find info w/ mem
732    DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
733    if (pInfo) {
734        if (pInfo->allocInfo.allocationSize == 0 && !internal) {
735            // TODO: Verify against Valid Use section
736            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
737                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
738                               "Attempting to free memory associated with a Persistent Image, %#" PRIxLEAST64 ", "
739                               "this should not be explicitly freed\n",
740                               (uint64_t)mem);
741        } else {
742            // Clear any CB bindings for completed CBs
743            //   TODO : Is there a better place to do this?
744
745            bool commandBufferComplete = false;
746            assert(pInfo->object != VK_NULL_HANDLE);
747            list<VkCommandBuffer>::iterator it = pInfo->pCommandBufferBindings.begin();
748            list<VkCommandBuffer>::iterator temp;
749            while (pInfo->pCommandBufferBindings.size() > 0 && it != pInfo->pCommandBufferBindings.end()) {
750                skipCall |= checkCBCompleted(dev_data, *it, &commandBufferComplete);
751                if (commandBufferComplete) {
752                    temp = it;
753                    ++temp;
754                    skipCall |= clear_cmd_buf_and_mem_references(dev_data, *it);
755                    it = temp;
756                } else {
757                    ++it;
758                }
759            }
760
761            // Now verify that no references to this mem obj remain and remove bindings
762            if (0 != pInfo->refCount) {
763                skipCall |= reportMemReferencesAndCleanUp(dev_data, pInfo);
764            }
765            // Delete mem obj info
766            skipCall |= deleteMemObjInfo(dev_data, object, mem);
767        }
768    }
769    return skipCall;
770}
771
772static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
773    switch (type) {
774    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
775        return "image";
776        break;
777    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
778        return "buffer";
779        break;
780    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
781        return "swapchain";
782        break;
783    default:
784        return "unknown";
785    }
786}
787
788// Remove object binding performs 3 tasks:
789// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
790// 2. Decrement refCount for MemObjInfo
791// 3. Clear mem binding for image/buffer by setting its handle to 0
792// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
793static VkBool32 clear_object_binding(layer_data *dev_data, void *dispObj, uint64_t handle, VkDebugReportObjectTypeEXT type) {
794    // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
795    VkBool32 skipCall = VK_FALSE;
796    MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
797    if (pObjBindInfo) {
798        DEVICE_MEM_INFO *pMemObjInfo = get_mem_obj_info(dev_data, pObjBindInfo->mem);
799        // TODO : Make sure this is a reasonable way to reset mem binding
800        pObjBindInfo->mem = VK_NULL_HANDLE;
801        if (pMemObjInfo) {
802            // This obj is bound to a memory object. Remove the reference to this object in that memory object's list, decrement the
803            // memObj's refcount
804            // and set the objects memory binding pointer to NULL.
805            VkBool32 clearSucceeded = VK_FALSE;
806            for (auto it = pMemObjInfo->pObjBindings.begin(); it != pMemObjInfo->pObjBindings.end(); ++it) {
807                if ((it->handle == handle) && (it->type == type)) {
808                    pMemObjInfo->refCount--;
809                    pMemObjInfo->pObjBindings.erase(it);
810                    clearSucceeded = VK_TRUE;
811                    break;
812                }
813            }
814            if (VK_FALSE == clearSucceeded) {
815                skipCall |=
816                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
817                            "MEM", "While trying to clear mem binding for %s obj %#" PRIxLEAST64
818                                   ", unable to find that object referenced by mem obj %#" PRIxLEAST64,
819                            object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
820            }
821        }
822    }
823    return skipCall;
824}
825
826// For NULL mem case, output warning
827// Make sure given object is in global object map
828//  IF a previous binding existed, output validation error
829//  Otherwise, add reference from objectInfo to memoryInfo
830//  Add reference off of objInfo
831//  device is required for error logging, need a dispatchable
832//  object for that.
833static VkBool32 set_mem_binding(layer_data *dev_data, void *dispatch_object, VkDeviceMemory mem, uint64_t handle,
834                                VkDebugReportObjectTypeEXT type, const char *apiName) {
835    VkBool32 skipCall = VK_FALSE;
836    // Handle NULL case separately, just clear previous binding & decrement reference
837    if (mem == VK_NULL_HANDLE) {
838        // TODO: Verify against Valid Use section of spec.
839        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
840                           "MEM", "In %s, attempting to Bind Obj(%#" PRIxLEAST64 ") to NULL", apiName, handle);
841    } else {
842        MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
843        if (!pObjBindInfo) {
844            skipCall |=
845                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS,
846                        "MEM", "In %s, attempting to update Binding of %s Obj(%#" PRIxLEAST64 ") that's not in global list()",
847                        object_type_to_string(type), apiName, handle);
848        } else {
849            // non-null case so should have real mem obj
850            DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
851            if (pMemInfo) {
852                // TODO : Need to track mem binding for obj and report conflict here
853                DEVICE_MEM_INFO *pPrevBinding = get_mem_obj_info(dev_data, pObjBindInfo->mem);
854                if (pPrevBinding != NULL) {
855                    skipCall |=
856                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
857                                (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
858                                "In %s, attempting to bind memory (%#" PRIxLEAST64 ") to object (%#" PRIxLEAST64
859                                ") which has already been bound to mem object %#" PRIxLEAST64,
860                                apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem);
861                } else {
862                    MT_OBJ_HANDLE_TYPE oht;
863                    oht.handle = handle;
864                    oht.type = type;
865                    pMemInfo->pObjBindings.push_front(oht);
866                    pMemInfo->refCount++;
867                    // For image objects, make sure default memory state is correctly set
868                    // TODO : What's the best/correct way to handle this?
869                    if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
870                        VkImageCreateInfo ici = pObjBindInfo->create_info.image;
871                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
872                            // TODO::  More memory state transition stuff.
873                        }
874                    }
875                    pObjBindInfo->mem = mem;
876                }
877            }
878        }
879    }
880    return skipCall;
881}
882
883// For NULL mem case, clear any previous binding Else...
884// Make sure given object is in its object map
885//  IF a previous binding existed, update binding
886//  Add reference from objectInfo to memoryInfo
887//  Add reference off of object's binding info
888// Return VK_TRUE if addition is successful, VK_FALSE otherwise
889static VkBool32 set_sparse_mem_binding(layer_data *dev_data, void *dispObject, VkDeviceMemory mem, uint64_t handle,
890                                       VkDebugReportObjectTypeEXT type, const char *apiName) {
891    VkBool32 skipCall = VK_FALSE;
892    // Handle NULL case separately, just clear previous binding & decrement reference
893    if (mem == VK_NULL_HANDLE) {
894        skipCall = clear_object_binding(dev_data, dispObject, handle, type);
895    } else {
896        MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
897        if (!pObjBindInfo) {
898            skipCall |= log_msg(
899                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS, "MEM",
900                "In %s, attempting to update Binding of Obj(%#" PRIxLEAST64 ") that's not in global list()", apiName, handle);
901        }
902        // non-null case so should have real mem obj
903        DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
904        if (pInfo) {
905            // Search for object in memory object's binding list
906            VkBool32 found = VK_FALSE;
907            if (pInfo->pObjBindings.size() > 0) {
908                for (auto it = pInfo->pObjBindings.begin(); it != pInfo->pObjBindings.end(); ++it) {
909                    if (((*it).handle == handle) && ((*it).type == type)) {
910                        found = VK_TRUE;
911                        break;
912                    }
913                }
914            }
915            // If not present, add to list
916            if (found == VK_FALSE) {
917                MT_OBJ_HANDLE_TYPE oht;
918                oht.handle = handle;
919                oht.type = type;
920                pInfo->pObjBindings.push_front(oht);
921                pInfo->refCount++;
922            }
923            // Need to set mem binding for this object
924            pObjBindInfo->mem = mem;
925        }
926    }
927    return skipCall;
928}
929
930template <typename T>
931void print_object_map_members(layer_data *my_data, void *dispObj, T const &objectName, VkDebugReportObjectTypeEXT objectType,
932                              const char *objectStr) {
933    for (auto const &element : objectName) {
934        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objectType, 0, __LINE__, MEMTRACK_NONE, "MEM",
935                "    %s Object list contains %s Object %#" PRIxLEAST64 " ", objectStr, objectStr, element.first);
936    }
937}
938
939// For given Object, get 'mem' obj that it's bound to or NULL if no binding
940static VkBool32 get_mem_binding_from_object(layer_data *my_data, void *dispObj, const uint64_t handle,
941                                            const VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
942    VkBool32 skipCall = VK_FALSE;
943    *mem = VK_NULL_HANDLE;
944    MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(my_data, handle, type);
945    if (pObjBindInfo) {
946        if (pObjBindInfo->mem) {
947            *mem = pObjBindInfo->mem;
948        } else {
949            skipCall =
950                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS,
951                        "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but object has no mem binding", handle);
952        }
953    } else {
954        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
955                           "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but no such object in %s list", handle,
956                           object_type_to_string(type));
957    }
958    return skipCall;
959}
960
961// Print details of MemObjInfo list
962static void print_mem_list(layer_data *dev_data, void *dispObj) {
963    DEVICE_MEM_INFO *pInfo = NULL;
964
965    // Early out if info is not requested
966    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
967        return;
968    }
969
970    // Just printing each msg individually for now, may want to package these into single large print
971    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
972            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
973            dev_data->memObjMap.size());
974    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
975            MEMTRACK_NONE, "MEM", "=============================");
976
977    if (dev_data->memObjMap.size() <= 0)
978        return;
979
980    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
981        pInfo = &(*ii).second;
982
983        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
984                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at %p===", (void *)pInfo);
985        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
986                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: %#" PRIxLEAST64, (uint64_t)(pInfo->mem));
987        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
988                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: %u", pInfo->refCount);
989        if (0 != pInfo->allocInfo.allocationSize) {
990            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&pInfo->allocInfo, "MEM(INFO):         ");
991            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
992                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
993        } else {
994            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
995                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
996        }
997
998        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
999                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
1000                pInfo->pObjBindings.size());
1001        if (pInfo->pObjBindings.size() > 0) {
1002            for (list<MT_OBJ_HANDLE_TYPE>::iterator it = pInfo->pObjBindings.begin(); it != pInfo->pObjBindings.end(); ++it) {
1003                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
1004                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT %" PRIu64, it->handle);
1005            }
1006        }
1007
1008        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
1009                __LINE__, MEMTRACK_NONE, "MEM",
1010                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
1011                pInfo->pCommandBufferBindings.size());
1012        if (pInfo->pCommandBufferBindings.size() > 0) {
1013            for (list<VkCommandBuffer>::iterator it = pInfo->pCommandBufferBindings.begin();
1014                 it != pInfo->pCommandBufferBindings.end(); ++it) {
1015                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
1016                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB %p", (*it));
1017            }
1018        }
1019    }
1020}
1021
1022static void printCBList(layer_data *my_data, void *dispObj) {
1023    MT_CB_INFO *pCBInfo = NULL;
1024
1025    // Early out if info is not requested
1026    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
1027        return;
1028    }
1029
1030    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
1031            MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)", my_data->cbMap.size());
1032    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
1033            MEMTRACK_NONE, "MEM", "==================");
1034
1035    if (my_data->cbMap.size() <= 0)
1036        return;
1037
1038    for (auto ii = my_data->cbMap.begin(); ii != my_data->cbMap.end(); ++ii) {
1039        pCBInfo = &(*ii).second;
1040
1041        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
1042                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (%p) has CB %p, fenceId %" PRIx64 ", and fence %#" PRIxLEAST64,
1043                (void *)pCBInfo, (void *)pCBInfo->commandBuffer, pCBInfo->fenceId, (uint64_t)pCBInfo->lastSubmittedFence);
1044
1045        if (pCBInfo->pMemObjList.size() <= 0)
1046            continue;
1047        for (list<VkDeviceMemory>::iterator it = pCBInfo->pMemObjList.begin(); it != pCBInfo->pMemObjList.end(); ++it) {
1048            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
1049                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj %" PRIu64, (uint64_t)(*it));
1050        }
1051    }
1052}
1053
1054#endif
1055
1056// Map actual TID to an index value and return that index
1057//  This keeps TIDs in range from 0-MAX_TID and simplifies compares between runs
1058static uint32_t getTIDIndex() {
1059    loader_platform_thread_id tid = loader_platform_get_thread_id();
1060    for (uint32_t i = 0; i < g_maxTID; i++) {
1061        if (tid == g_tidMapping[i])
1062            return i;
1063    }
1064    // Don't yet have mapping, set it and return newly set index
1065    uint32_t retVal = (uint32_t)g_maxTID;
1066    g_tidMapping[g_maxTID++] = tid;
1067    assert(g_maxTID < MAX_TID);
1068    return retVal;
1069}
1070
1071// Return a string representation of CMD_TYPE enum
1072static string cmdTypeToString(CMD_TYPE cmd) {
1073    switch (cmd) {
1074    case CMD_BINDPIPELINE:
1075        return "CMD_BINDPIPELINE";
1076    case CMD_BINDPIPELINEDELTA:
1077        return "CMD_BINDPIPELINEDELTA";
1078    case CMD_SETVIEWPORTSTATE:
1079        return "CMD_SETVIEWPORTSTATE";
1080    case CMD_SETLINEWIDTHSTATE:
1081        return "CMD_SETLINEWIDTHSTATE";
1082    case CMD_SETDEPTHBIASSTATE:
1083        return "CMD_SETDEPTHBIASSTATE";
1084    case CMD_SETBLENDSTATE:
1085        return "CMD_SETBLENDSTATE";
1086    case CMD_SETDEPTHBOUNDSSTATE:
1087        return "CMD_SETDEPTHBOUNDSSTATE";
1088    case CMD_SETSTENCILREADMASKSTATE:
1089        return "CMD_SETSTENCILREADMASKSTATE";
1090    case CMD_SETSTENCILWRITEMASKSTATE:
1091        return "CMD_SETSTENCILWRITEMASKSTATE";
1092    case CMD_SETSTENCILREFERENCESTATE:
1093        return "CMD_SETSTENCILREFERENCESTATE";
1094    case CMD_BINDDESCRIPTORSETS:
1095        return "CMD_BINDDESCRIPTORSETS";
1096    case CMD_BINDINDEXBUFFER:
1097        return "CMD_BINDINDEXBUFFER";
1098    case CMD_BINDVERTEXBUFFER:
1099        return "CMD_BINDVERTEXBUFFER";
1100    case CMD_DRAW:
1101        return "CMD_DRAW";
1102    case CMD_DRAWINDEXED:
1103        return "CMD_DRAWINDEXED";
1104    case CMD_DRAWINDIRECT:
1105        return "CMD_DRAWINDIRECT";
1106    case CMD_DRAWINDEXEDINDIRECT:
1107        return "CMD_DRAWINDEXEDINDIRECT";
1108    case CMD_DISPATCH:
1109        return "CMD_DISPATCH";
1110    case CMD_DISPATCHINDIRECT:
1111        return "CMD_DISPATCHINDIRECT";
1112    case CMD_COPYBUFFER:
1113        return "CMD_COPYBUFFER";
1114    case CMD_COPYIMAGE:
1115        return "CMD_COPYIMAGE";
1116    case CMD_BLITIMAGE:
1117        return "CMD_BLITIMAGE";
1118    case CMD_COPYBUFFERTOIMAGE:
1119        return "CMD_COPYBUFFERTOIMAGE";
1120    case CMD_COPYIMAGETOBUFFER:
1121        return "CMD_COPYIMAGETOBUFFER";
1122    case CMD_CLONEIMAGEDATA:
1123        return "CMD_CLONEIMAGEDATA";
1124    case CMD_UPDATEBUFFER:
1125        return "CMD_UPDATEBUFFER";
1126    case CMD_FILLBUFFER:
1127        return "CMD_FILLBUFFER";
1128    case CMD_CLEARCOLORIMAGE:
1129        return "CMD_CLEARCOLORIMAGE";
1130    case CMD_CLEARATTACHMENTS:
1131        return "CMD_CLEARCOLORATTACHMENT";
1132    case CMD_CLEARDEPTHSTENCILIMAGE:
1133        return "CMD_CLEARDEPTHSTENCILIMAGE";
1134    case CMD_RESOLVEIMAGE:
1135        return "CMD_RESOLVEIMAGE";
1136    case CMD_SETEVENT:
1137        return "CMD_SETEVENT";
1138    case CMD_RESETEVENT:
1139        return "CMD_RESETEVENT";
1140    case CMD_WAITEVENTS:
1141        return "CMD_WAITEVENTS";
1142    case CMD_PIPELINEBARRIER:
1143        return "CMD_PIPELINEBARRIER";
1144    case CMD_BEGINQUERY:
1145        return "CMD_BEGINQUERY";
1146    case CMD_ENDQUERY:
1147        return "CMD_ENDQUERY";
1148    case CMD_RESETQUERYPOOL:
1149        return "CMD_RESETQUERYPOOL";
1150    case CMD_COPYQUERYPOOLRESULTS:
1151        return "CMD_COPYQUERYPOOLRESULTS";
1152    case CMD_WRITETIMESTAMP:
1153        return "CMD_WRITETIMESTAMP";
1154    case CMD_INITATOMICCOUNTERS:
1155        return "CMD_INITATOMICCOUNTERS";
1156    case CMD_LOADATOMICCOUNTERS:
1157        return "CMD_LOADATOMICCOUNTERS";
1158    case CMD_SAVEATOMICCOUNTERS:
1159        return "CMD_SAVEATOMICCOUNTERS";
1160    case CMD_BEGINRENDERPASS:
1161        return "CMD_BEGINRENDERPASS";
1162    case CMD_ENDRENDERPASS:
1163        return "CMD_ENDRENDERPASS";
1164    default:
1165        return "UNKNOWN";
1166    }
1167}
1168
1169// SPIRV utility functions
1170static void build_def_index(shader_module *module) {
1171    for (auto insn : *module) {
1172        switch (insn.opcode()) {
1173        /* Types */
1174        case spv::OpTypeVoid:
1175        case spv::OpTypeBool:
1176        case spv::OpTypeInt:
1177        case spv::OpTypeFloat:
1178        case spv::OpTypeVector:
1179        case spv::OpTypeMatrix:
1180        case spv::OpTypeImage:
1181        case spv::OpTypeSampler:
1182        case spv::OpTypeSampledImage:
1183        case spv::OpTypeArray:
1184        case spv::OpTypeRuntimeArray:
1185        case spv::OpTypeStruct:
1186        case spv::OpTypeOpaque:
1187        case spv::OpTypePointer:
1188        case spv::OpTypeFunction:
1189        case spv::OpTypeEvent:
1190        case spv::OpTypeDeviceEvent:
1191        case spv::OpTypeReserveId:
1192        case spv::OpTypeQueue:
1193        case spv::OpTypePipe:
1194            module->def_index[insn.word(1)] = insn.offset();
1195            break;
1196
1197        /* Fixed constants */
1198        case spv::OpConstantTrue:
1199        case spv::OpConstantFalse:
1200        case spv::OpConstant:
1201        case spv::OpConstantComposite:
1202        case spv::OpConstantSampler:
1203        case spv::OpConstantNull:
1204            module->def_index[insn.word(2)] = insn.offset();
1205            break;
1206
1207        /* Specialization constants */
1208        case spv::OpSpecConstantTrue:
1209        case spv::OpSpecConstantFalse:
1210        case spv::OpSpecConstant:
1211        case spv::OpSpecConstantComposite:
1212        case spv::OpSpecConstantOp:
1213            module->def_index[insn.word(2)] = insn.offset();
1214            break;
1215
1216        /* Variables */
1217        case spv::OpVariable:
1218            module->def_index[insn.word(2)] = insn.offset();
1219            break;
1220
1221        /* Functions */
1222        case spv::OpFunction:
1223            module->def_index[insn.word(2)] = insn.offset();
1224            break;
1225
1226        default:
1227            /* We don't care about any other defs for now. */
1228            break;
1229        }
1230    }
1231}
1232
1233static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1234    for (auto insn : *src) {
1235        if (insn.opcode() == spv::OpEntryPoint) {
1236            auto entrypointName = (char const *)&insn.word(3);
1237            auto entrypointStageBits = 1u << insn.word(1);
1238
1239            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1240                return insn;
1241            }
1242        }
1243    }
1244
1245    return src->end();
1246}
1247
1248bool shader_is_spirv(VkShaderModuleCreateInfo const *pCreateInfo) {
1249    uint32_t *words = (uint32_t *)pCreateInfo->pCode;
1250    size_t sizeInWords = pCreateInfo->codeSize / sizeof(uint32_t);
1251
1252    /* Just validate that the header makes sense. */
1253    return sizeInWords >= 5 && words[0] == spv::MagicNumber && words[1] == spv::Version;
1254}
1255
1256static char const *storage_class_name(unsigned sc) {
1257    switch (sc) {
1258    case spv::StorageClassInput:
1259        return "input";
1260    case spv::StorageClassOutput:
1261        return "output";
1262    case spv::StorageClassUniformConstant:
1263        return "const uniform";
1264    case spv::StorageClassUniform:
1265        return "uniform";
1266    case spv::StorageClassWorkgroup:
1267        return "workgroup local";
1268    case spv::StorageClassCrossWorkgroup:
1269        return "workgroup global";
1270    case spv::StorageClassPrivate:
1271        return "private global";
1272    case spv::StorageClassFunction:
1273        return "function";
1274    case spv::StorageClassGeneric:
1275        return "generic";
1276    case spv::StorageClassAtomicCounter:
1277        return "atomic counter";
1278    case spv::StorageClassImage:
1279        return "image";
1280    case spv::StorageClassPushConstant:
1281        return "push constant";
1282    default:
1283        return "unknown";
1284    }
1285}
1286
1287/* get the value of an integral constant */
1288unsigned get_constant_value(shader_module const *src, unsigned id) {
1289    auto value = src->get_def(id);
1290    assert(value != src->end());
1291
1292    if (value.opcode() != spv::OpConstant) {
1293        /* TODO: Either ensure that the specialization transform is already performed on a module we're
1294            considering here, OR -- specialize on the fly now.
1295            */
1296        return 1;
1297    }
1298
1299    return value.word(3);
1300}
1301
1302/* returns ptr to null terminator */
1303static char *describe_type(char *dst, shader_module const *src, unsigned type) {
1304    auto insn = src->get_def(type);
1305    assert(insn != src->end());
1306
1307    switch (insn.opcode()) {
1308    case spv::OpTypeBool:
1309        return dst + sprintf(dst, "bool");
1310    case spv::OpTypeInt:
1311        return dst + sprintf(dst, "%cint%d", insn.word(3) ? 's' : 'u', insn.word(2));
1312    case spv::OpTypeFloat:
1313        return dst + sprintf(dst, "float%d", insn.word(2));
1314    case spv::OpTypeVector:
1315        dst += sprintf(dst, "vec%d of ", insn.word(3));
1316        return describe_type(dst, src, insn.word(2));
1317    case spv::OpTypeMatrix:
1318        dst += sprintf(dst, "mat%d of ", insn.word(3));
1319        return describe_type(dst, src, insn.word(2));
1320    case spv::OpTypeArray:
1321        dst += sprintf(dst, "arr[%d] of ", get_constant_value(src, insn.word(3)));
1322        return describe_type(dst, src, insn.word(2));
1323    case spv::OpTypePointer:
1324        dst += sprintf(dst, "ptr to %s ", storage_class_name(insn.word(2)));
1325        return describe_type(dst, src, insn.word(3));
1326    case spv::OpTypeStruct: {
1327        dst += sprintf(dst, "struct of (");
1328        for (unsigned i = 2; i < insn.len(); i++) {
1329            dst = describe_type(dst, src, insn.word(i));
1330            dst += sprintf(dst, i == insn.len() - 1 ? ")" : ", ");
1331        }
1332        return dst;
1333    }
1334    case spv::OpTypeSampler:
1335        return dst + sprintf(dst, "sampler");
1336    case spv::OpTypeSampledImage:
1337        dst += sprintf(dst, "sampler+");
1338        return describe_type(dst, src, insn.word(2));
1339    case spv::OpTypeImage:
1340        dst += sprintf(dst, "image(dim=%u, sampled=%u)", insn.word(3), insn.word(7));
1341        return dst;
1342    default:
1343        return dst + sprintf(dst, "oddtype");
1344    }
1345}
1346
1347static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool b_arrayed) {
1348    /* walk two type trees together, and complain about differences */
1349    auto a_insn = a->get_def(a_type);
1350    auto b_insn = b->get_def(b_type);
1351    assert(a_insn != a->end());
1352    assert(b_insn != b->end());
1353
1354    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1355        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1356        return types_match(a, b, a_type, b_insn.word(2), false);
1357    }
1358
1359    if (a_insn.opcode() != b_insn.opcode()) {
1360        return false;
1361    }
1362
1363    switch (a_insn.opcode()) {
1364    /* if b_arrayed and we hit a leaf type, then we can't match -- there's nowhere for the extra OpTypeArray to be! */
1365    case spv::OpTypeBool:
1366        return true && !b_arrayed;
1367    case spv::OpTypeInt:
1368        /* match on width, signedness */
1369        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3) && !b_arrayed;
1370    case spv::OpTypeFloat:
1371        /* match on width */
1372        return a_insn.word(2) == b_insn.word(2) && !b_arrayed;
1373    case spv::OpTypeVector:
1374    case spv::OpTypeMatrix:
1375        /* match on element type, count. these all have the same layout. we don't get here if
1376         * b_arrayed -- that is handled above. */
1377        return !b_arrayed && types_match(a, b, a_insn.word(2), b_insn.word(2), b_arrayed) && a_insn.word(3) == b_insn.word(3);
1378    case spv::OpTypeArray:
1379        /* match on element type, count. these all have the same layout. we don't get here if
1380         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1381         * not a literal within OpTypeArray */
1382        return !b_arrayed && types_match(a, b, a_insn.word(2), b_insn.word(2), b_arrayed) &&
1383               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1384    case spv::OpTypeStruct:
1385        /* match on all element types */
1386        {
1387            if (b_arrayed) {
1388                /* for the purposes of matching different levels of arrayness, structs are leaves. */
1389                return false;
1390            }
1391
1392            if (a_insn.len() != b_insn.len()) {
1393                return false; /* structs cannot match if member counts differ */
1394            }
1395
1396            for (unsigned i = 2; i < a_insn.len(); i++) {
1397                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), b_arrayed)) {
1398                    return false;
1399                }
1400            }
1401
1402            return true;
1403        }
1404    case spv::OpTypePointer:
1405        /* match on pointee type. storage class is expected to differ */
1406        return types_match(a, b, a_insn.word(3), b_insn.word(3), b_arrayed);
1407
1408    default:
1409        /* remaining types are CLisms, or may not appear in the interfaces we
1410         * are interested in. Just claim no match.
1411         */
1412        return false;
1413    }
1414}
1415
1416static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1417    auto it = map.find(id);
1418    if (it == map.end())
1419        return def;
1420    else
1421        return it->second;
1422}
1423
1424static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1425    auto insn = src->get_def(type);
1426    assert(insn != src->end());
1427
1428    switch (insn.opcode()) {
1429    case spv::OpTypePointer:
1430        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1431         * we're never actually passing pointers around. */
1432        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1433    case spv::OpTypeArray:
1434        if (strip_array_level) {
1435            return get_locations_consumed_by_type(src, insn.word(2), false);
1436        } else {
1437            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1438        }
1439    case spv::OpTypeMatrix:
1440        /* num locations is the dimension * element size */
1441        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1442    default:
1443        /* everything else is just 1. */
1444        return 1;
1445
1446        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1447         * multiple locations. */
1448    }
1449}
1450
1451typedef std::pair<unsigned, unsigned> location_t;
1452typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1453
1454struct interface_var {
1455    uint32_t id;
1456    uint32_t type_id;
1457    uint32_t offset;
1458    /* TODO: collect the name, too? Isn't required to be present. */
1459};
1460
1461static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1462    while (true) {
1463
1464        if (def.opcode() == spv::OpTypePointer) {
1465            def = src->get_def(def.word(3));
1466        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1467            def = src->get_def(def.word(2));
1468            is_array_of_verts = false;
1469        } else if (def.opcode() == spv::OpTypeStruct) {
1470            return def;
1471        } else {
1472            return src->end();
1473        }
1474    }
1475}
1476
1477static void collect_interface_block_members(layer_data *my_data, VkDevice dev, shader_module const *src,
1478                                            std::map<location_t, interface_var> &out,
1479                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1480                                            uint32_t id, uint32_t type_id) {
1481    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1482    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts);
1483    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1484        /* this isn't an interface block. */
1485        return;
1486    }
1487
1488    std::unordered_map<unsigned, unsigned> member_components;
1489
1490    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1491    for (auto insn : *src) {
1492        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1493            unsigned member_index = insn.word(2);
1494
1495            if (insn.word(3) == spv::DecorationComponent) {
1496                unsigned component = insn.word(4);
1497                member_components[member_index] = component;
1498            }
1499        }
1500    }
1501
1502    /* Second pass -- produce the output, from Location decorations */
1503    for (auto insn : *src) {
1504        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1505            unsigned member_index = insn.word(2);
1506            unsigned member_type_id = type.word(2 + member_index);
1507
1508            if (insn.word(3) == spv::DecorationLocation) {
1509                unsigned location = insn.word(4);
1510                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1511                auto component_it = member_components.find(member_index);
1512                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1513
1514                for (unsigned int offset = 0; offset < num_locations; offset++) {
1515                    interface_var v;
1516                    v.id = id;
1517                    /* TODO: member index in interface_var too? */
1518                    v.type_id = member_type_id;
1519                    v.offset = offset;
1520                    out[std::make_pair(location + offset, component)] = v;
1521                }
1522            }
1523        }
1524    }
1525}
1526
1527static void collect_interface_by_location(layer_data *my_data, VkDevice dev, shader_module const *src, spirv_inst_iter entrypoint,
1528                                          spv::StorageClass sinterface, std::map<location_t, interface_var> &out,
1529                                          bool is_array_of_verts) {
1530    std::unordered_map<unsigned, unsigned> var_locations;
1531    std::unordered_map<unsigned, unsigned> var_builtins;
1532    std::unordered_map<unsigned, unsigned> var_components;
1533    std::unordered_map<unsigned, unsigned> blocks;
1534
1535    for (auto insn : *src) {
1536
1537        /* We consider two interface models: SSO rendezvous-by-location, and
1538         * builtins. Complain about anything that fits neither model.
1539         */
1540        if (insn.opcode() == spv::OpDecorate) {
1541            if (insn.word(2) == spv::DecorationLocation) {
1542                var_locations[insn.word(1)] = insn.word(3);
1543            }
1544
1545            if (insn.word(2) == spv::DecorationBuiltIn) {
1546                var_builtins[insn.word(1)] = insn.word(3);
1547            }
1548
1549            if (insn.word(2) == spv::DecorationComponent) {
1550                var_components[insn.word(1)] = insn.word(3);
1551            }
1552
1553            if (insn.word(2) == spv::DecorationBlock) {
1554                blocks[insn.word(1)] = 1;
1555            }
1556        }
1557    }
1558
1559    /* TODO: handle grouped decorations */
1560    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1561     * have the same location, and we DONT want to clobber. */
1562
1563    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1564       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1565       the word to determine which word contains the terminator. */
1566    auto word = 3;
1567    while (entrypoint.word(word) & 0xff000000u) {
1568        ++word;
1569    }
1570    ++word;
1571
1572    for (; word < entrypoint.len(); word++) {
1573        auto insn = src->get_def(entrypoint.word(word));
1574        assert(insn != src->end());
1575        assert(insn.opcode() == spv::OpVariable);
1576
1577        if (insn.word(3) == sinterface) {
1578            unsigned id = insn.word(2);
1579            unsigned type = insn.word(1);
1580
1581            int location = value_or_default(var_locations, id, -1);
1582            int builtin = value_or_default(var_builtins, id, -1);
1583            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1584
1585            /* All variables and interface block members in the Input or Output storage classes
1586             * must be decorated with either a builtin or an explicit location.
1587             *
1588             * TODO: integrate the interface block support here. For now, don't complain --
1589             * a valid SPIRV module will only hit this path for the interface block case, as the
1590             * individual members of the type are decorated, rather than variable declarations.
1591             */
1592
1593            if (location != -1) {
1594                /* A user-defined interface variable, with a location. Where a variable
1595                 * occupied multiple locations, emit one result for each. */
1596                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts);
1597                for (unsigned int offset = 0; offset < num_locations; offset++) {
1598                    interface_var v;
1599                    v.id = id;
1600                    v.type_id = type;
1601                    v.offset = offset;
1602                    out[std::make_pair(location + offset, component)] = v;
1603                }
1604            } else if (builtin == -1) {
1605                /* An interface block instance */
1606                collect_interface_block_members(my_data, dev, src, out, blocks, is_array_of_verts, id, type);
1607            }
1608        }
1609    }
1610}
1611
1612static void collect_interface_by_descriptor_slot(layer_data *my_data, VkDevice dev, shader_module const *src,
1613                                                 std::unordered_set<uint32_t> const &accessible_ids,
1614                                                 std::map<descriptor_slot_t, interface_var> &out) {
1615
1616    std::unordered_map<unsigned, unsigned> var_sets;
1617    std::unordered_map<unsigned, unsigned> var_bindings;
1618
1619    for (auto insn : *src) {
1620        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1621         * DecorationDescriptorSet and DecorationBinding.
1622         */
1623        if (insn.opcode() == spv::OpDecorate) {
1624            if (insn.word(2) == spv::DecorationDescriptorSet) {
1625                var_sets[insn.word(1)] = insn.word(3);
1626            }
1627
1628            if (insn.word(2) == spv::DecorationBinding) {
1629                var_bindings[insn.word(1)] = insn.word(3);
1630            }
1631        }
1632    }
1633
1634    for (auto id : accessible_ids) {
1635        auto insn = src->get_def(id);
1636        assert(insn != src->end());
1637
1638        if (insn.opcode() == spv::OpVariable &&
1639            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1640            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1641            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1642
1643            auto existing_it = out.find(std::make_pair(set, binding));
1644            if (existing_it != out.end()) {
1645                /* conflict within spv image */
1646                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1647                        __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
1648                        "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition",
1649                        insn.word(2), insn.word(1), storage_class_name(insn.word(3)), existing_it->first.first,
1650                        existing_it->first.second);
1651            }
1652
1653            interface_var v;
1654            v.id = insn.word(2);
1655            v.type_id = insn.word(1);
1656            out[std::make_pair(set, binding)] = v;
1657        }
1658    }
1659}
1660
1661static bool validate_interface_between_stages(layer_data *my_data, VkDevice dev, shader_module const *producer,
1662                                              spirv_inst_iter producer_entrypoint, char const *producer_name,
1663                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1664                                              char const *consumer_name, bool consumer_arrayed_input) {
1665    std::map<location_t, interface_var> outputs;
1666    std::map<location_t, interface_var> inputs;
1667
1668    bool pass = true;
1669
1670    collect_interface_by_location(my_data, dev, producer, producer_entrypoint, spv::StorageClassOutput, outputs, false);
1671    collect_interface_by_location(my_data, dev, consumer, consumer_entrypoint, spv::StorageClassInput, inputs,
1672                                  consumer_arrayed_input);
1673
1674    auto a_it = outputs.begin();
1675    auto b_it = inputs.begin();
1676
1677    /* maps sorted by key (location); walk them together to find mismatches */
1678    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1679        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1680        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1681        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1682        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1683
1684        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1685            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
1686                        /*dev*/ 0, __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1687                        "%s writes to output location %u.%u which is not consumed by %s", producer_name, a_first.first,
1688                        a_first.second, consumer_name)) {
1689                pass = false;
1690            }
1691            a_it++;
1692        } else if (a_at_end || a_first > b_first) {
1693            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1694                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1695                        "%s consumes input location %u.%u which is not written by %s", consumer_name, b_first.first, b_first.second,
1696                        producer_name)) {
1697                pass = false;
1698            }
1699            b_it++;
1700        } else {
1701            if (types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id, consumer_arrayed_input)) {
1702                /* OK! */
1703            } else {
1704                char producer_type[1024];
1705                char consumer_type[1024];
1706                describe_type(producer_type, producer, a_it->second.type_id);
1707                describe_type(consumer_type, consumer, b_it->second.type_id);
1708
1709                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1710                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1711                            a_first.first, a_first.second, producer_type, consumer_type)) {
1712                    pass = false;
1713                }
1714            }
1715            a_it++;
1716            b_it++;
1717        }
1718    }
1719
1720    return pass;
1721}
1722
1723enum FORMAT_TYPE {
1724    FORMAT_TYPE_UNDEFINED,
1725    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1726    FORMAT_TYPE_SINT,
1727    FORMAT_TYPE_UINT,
1728};
1729
1730static unsigned get_format_type(VkFormat fmt) {
1731    switch (fmt) {
1732    case VK_FORMAT_UNDEFINED:
1733        return FORMAT_TYPE_UNDEFINED;
1734    case VK_FORMAT_R8_SINT:
1735    case VK_FORMAT_R8G8_SINT:
1736    case VK_FORMAT_R8G8B8_SINT:
1737    case VK_FORMAT_R8G8B8A8_SINT:
1738    case VK_FORMAT_R16_SINT:
1739    case VK_FORMAT_R16G16_SINT:
1740    case VK_FORMAT_R16G16B16_SINT:
1741    case VK_FORMAT_R16G16B16A16_SINT:
1742    case VK_FORMAT_R32_SINT:
1743    case VK_FORMAT_R32G32_SINT:
1744    case VK_FORMAT_R32G32B32_SINT:
1745    case VK_FORMAT_R32G32B32A32_SINT:
1746    case VK_FORMAT_B8G8R8_SINT:
1747    case VK_FORMAT_B8G8R8A8_SINT:
1748    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1749    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1750        return FORMAT_TYPE_SINT;
1751    case VK_FORMAT_R8_UINT:
1752    case VK_FORMAT_R8G8_UINT:
1753    case VK_FORMAT_R8G8B8_UINT:
1754    case VK_FORMAT_R8G8B8A8_UINT:
1755    case VK_FORMAT_R16_UINT:
1756    case VK_FORMAT_R16G16_UINT:
1757    case VK_FORMAT_R16G16B16_UINT:
1758    case VK_FORMAT_R16G16B16A16_UINT:
1759    case VK_FORMAT_R32_UINT:
1760    case VK_FORMAT_R32G32_UINT:
1761    case VK_FORMAT_R32G32B32_UINT:
1762    case VK_FORMAT_R32G32B32A32_UINT:
1763    case VK_FORMAT_B8G8R8_UINT:
1764    case VK_FORMAT_B8G8R8A8_UINT:
1765    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1766    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1767        return FORMAT_TYPE_UINT;
1768    default:
1769        return FORMAT_TYPE_FLOAT;
1770    }
1771}
1772
1773/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1774 * for comparison to a VkFormat's characterization above. */
1775static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1776    auto insn = src->get_def(type);
1777    assert(insn != src->end());
1778
1779    switch (insn.opcode()) {
1780    case spv::OpTypeInt:
1781        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1782    case spv::OpTypeFloat:
1783        return FORMAT_TYPE_FLOAT;
1784    case spv::OpTypeVector:
1785        return get_fundamental_type(src, insn.word(2));
1786    case spv::OpTypeMatrix:
1787        return get_fundamental_type(src, insn.word(2));
1788    case spv::OpTypeArray:
1789        return get_fundamental_type(src, insn.word(2));
1790    case spv::OpTypePointer:
1791        return get_fundamental_type(src, insn.word(3));
1792    default:
1793        return FORMAT_TYPE_UNDEFINED;
1794    }
1795}
1796
1797static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1798    uint32_t bit_pos = u_ffs(stage);
1799    return bit_pos - 1;
1800}
1801
1802static bool validate_vi_consistency(layer_data *my_data, VkDevice dev, VkPipelineVertexInputStateCreateInfo const *vi) {
1803    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1804     * each binding should be specified only once.
1805     */
1806    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1807    bool pass = true;
1808
1809    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1810        auto desc = &vi->pVertexBindingDescriptions[i];
1811        auto &binding = bindings[desc->binding];
1812        if (binding) {
1813            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1814                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1815                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1816                pass = false;
1817            }
1818        } else {
1819            binding = desc;
1820        }
1821    }
1822
1823    return pass;
1824}
1825
1826static bool validate_vi_against_vs_inputs(layer_data *my_data, VkDevice dev, VkPipelineVertexInputStateCreateInfo const *vi,
1827                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1828    std::map<location_t, interface_var> inputs;
1829    bool pass = true;
1830
1831    collect_interface_by_location(my_data, dev, vs, entrypoint, spv::StorageClassInput, inputs, false);
1832
1833    /* Build index by location */
1834    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1835    if (vi) {
1836        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++)
1837            attribs[vi->pVertexAttributeDescriptions[i].location] = &vi->pVertexAttributeDescriptions[i];
1838    }
1839
1840    auto it_a = attribs.begin();
1841    auto it_b = inputs.begin();
1842
1843    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1844        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1845        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1846        auto a_first = a_at_end ? 0 : it_a->first;
1847        auto b_first = b_at_end ? 0 : it_b->first.first;
1848        if (!a_at_end && (b_at_end || a_first < b_first)) {
1849            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
1850                        /*dev*/ 0, __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1851                        "Vertex attribute at location %d not consumed by VS", a_first)) {
1852                pass = false;
1853            }
1854            it_a++;
1855        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1856            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1857                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
1858                        b_first)) {
1859                pass = false;
1860            }
1861            it_b++;
1862        } else {
1863            unsigned attrib_type = get_format_type(it_a->second->format);
1864            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1865
1866            /* type checking */
1867            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1868                char vs_type[1024];
1869                describe_type(vs_type, vs, it_b->second.type_id);
1870                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1871                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1872                            "Attribute type of `%s` at location %d does not match VS input type of `%s`",
1873                            string_VkFormat(it_a->second->format), a_first, vs_type)) {
1874                    pass = false;
1875                }
1876            }
1877
1878            /* OK! */
1879            it_a++;
1880            it_b++;
1881        }
1882    }
1883
1884    return pass;
1885}
1886
1887static bool validate_fs_outputs_against_render_pass(layer_data *my_data, VkDevice dev, shader_module const *fs,
1888                                                    spirv_inst_iter entrypoint, RENDER_PASS_NODE const *rp, uint32_t subpass) {
1889    const std::vector<VkFormat> &color_formats = rp->subpassColorFormats[subpass];
1890    std::map<location_t, interface_var> outputs;
1891    bool pass = true;
1892
1893    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1894
1895    collect_interface_by_location(my_data, dev, fs, entrypoint, spv::StorageClassOutput, outputs, false);
1896
1897    auto it = outputs.begin();
1898    uint32_t attachment = 0;
1899
1900    /* Walk attachment list and outputs together -- this is a little overpowered since attachments
1901     * are currently dense, but the parallel with matching between shader stages is nice.
1902     */
1903
1904    while ((outputs.size() > 0 && it != outputs.end()) || attachment < color_formats.size()) {
1905        if (attachment == color_formats.size() || (it != outputs.end() && it->first.first < attachment)) {
1906            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1907                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1908                        "FS writes to output location %d with no matching attachment", it->first.first)) {
1909                pass = false;
1910            }
1911            it++;
1912        } else if (it == outputs.end() || it->first.first > attachment) {
1913            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1914                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", attachment)) {
1915                pass = false;
1916            }
1917            attachment++;
1918        } else {
1919            unsigned output_type = get_fundamental_type(fs, it->second.type_id);
1920            unsigned att_type = get_format_type(color_formats[attachment]);
1921
1922            /* type checking */
1923            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1924                char fs_type[1024];
1925                describe_type(fs_type, fs, it->second.type_id);
1926                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1927                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1928                            "Attachment %d of type `%s` does not match FS output type of `%s`", attachment,
1929                            string_VkFormat(color_formats[attachment]), fs_type)) {
1930                    pass = false;
1931                }
1932            }
1933
1934            /* OK! */
1935            it++;
1936            attachment++;
1937        }
1938    }
1939
1940    return pass;
1941}
1942
1943/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
1944 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
1945 * for example.
1946 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1947 *  - NOT the shader input/output interfaces.
1948 *
1949 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1950 * converting parts of this to be generated from the machine-readable spec instead.
1951 */
1952static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) {
1953    std::unordered_set<uint32_t> worklist;
1954    worklist.insert(entrypoint.word(2));
1955
1956    while (!worklist.empty()) {
1957        auto id_iter = worklist.begin();
1958        auto id = *id_iter;
1959        worklist.erase(id_iter);
1960
1961        auto insn = src->get_def(id);
1962        if (insn == src->end()) {
1963            /* id is something we didnt collect in build_def_index. that's OK -- we'll stumble
1964             * across all kinds of things here that we may not care about. */
1965            continue;
1966        }
1967
1968        /* try to add to the output set */
1969        if (!ids.insert(id).second) {
1970            continue; /* if we already saw this id, we don't want to walk it again. */
1971        }
1972
1973        switch (insn.opcode()) {
1974        case spv::OpFunction:
1975            /* scan whole body of the function, enlisting anything interesting */
1976            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1977                switch (insn.opcode()) {
1978                case spv::OpLoad:
1979                case spv::OpAtomicLoad:
1980                case spv::OpAtomicExchange:
1981                case spv::OpAtomicCompareExchange:
1982                case spv::OpAtomicCompareExchangeWeak:
1983                case spv::OpAtomicIIncrement:
1984                case spv::OpAtomicIDecrement:
1985                case spv::OpAtomicIAdd:
1986                case spv::OpAtomicISub:
1987                case spv::OpAtomicSMin:
1988                case spv::OpAtomicUMin:
1989                case spv::OpAtomicSMax:
1990                case spv::OpAtomicUMax:
1991                case spv::OpAtomicAnd:
1992                case spv::OpAtomicOr:
1993                case spv::OpAtomicXor:
1994                    worklist.insert(insn.word(3)); /* ptr */
1995                    break;
1996                case spv::OpStore:
1997                case spv::OpAtomicStore:
1998                    worklist.insert(insn.word(1)); /* ptr */
1999                    break;
2000                case spv::OpAccessChain:
2001                case spv::OpInBoundsAccessChain:
2002                    worklist.insert(insn.word(3)); /* base ptr */
2003                    break;
2004                case spv::OpSampledImage:
2005                case spv::OpImageSampleImplicitLod:
2006                case spv::OpImageSampleExplicitLod:
2007                case spv::OpImageSampleDrefImplicitLod:
2008                case spv::OpImageSampleDrefExplicitLod:
2009                case spv::OpImageSampleProjImplicitLod:
2010                case spv::OpImageSampleProjExplicitLod:
2011                case spv::OpImageSampleProjDrefImplicitLod:
2012                case spv::OpImageSampleProjDrefExplicitLod:
2013                case spv::OpImageFetch:
2014                case spv::OpImageGather:
2015                case spv::OpImageDrefGather:
2016                case spv::OpImageRead:
2017                case spv::OpImage:
2018                case spv::OpImageQueryFormat:
2019                case spv::OpImageQueryOrder:
2020                case spv::OpImageQuerySizeLod:
2021                case spv::OpImageQuerySize:
2022                case spv::OpImageQueryLod:
2023                case spv::OpImageQueryLevels:
2024                case spv::OpImageQuerySamples:
2025                case spv::OpImageSparseSampleImplicitLod:
2026                case spv::OpImageSparseSampleExplicitLod:
2027                case spv::OpImageSparseSampleDrefImplicitLod:
2028                case spv::OpImageSparseSampleDrefExplicitLod:
2029                case spv::OpImageSparseSampleProjImplicitLod:
2030                case spv::OpImageSparseSampleProjExplicitLod:
2031                case spv::OpImageSparseSampleProjDrefImplicitLod:
2032                case spv::OpImageSparseSampleProjDrefExplicitLod:
2033                case spv::OpImageSparseFetch:
2034                case spv::OpImageSparseGather:
2035                case spv::OpImageSparseDrefGather:
2036                case spv::OpImageTexelPointer:
2037                    worklist.insert(insn.word(3)); /* image or sampled image */
2038                    break;
2039                case spv::OpImageWrite:
2040                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
2041                    break;
2042                case spv::OpFunctionCall:
2043                    for (auto i = 3; i < insn.len(); i++) {
2044                        worklist.insert(insn.word(i)); /* fn itself, and all args */
2045                    }
2046                    break;
2047
2048                case spv::OpExtInst:
2049                    for (auto i = 5; i < insn.len(); i++) {
2050                        worklist.insert(insn.word(i)); /* operands to ext inst */
2051                    }
2052                    break;
2053                }
2054            }
2055            break;
2056        }
2057    }
2058}
2059
2060struct shader_stage_attributes {
2061    char const *const name;
2062    bool arrayed_input;
2063};
2064
2065static shader_stage_attributes shader_stage_attribs[] = {
2066    {"vertex shader", false},
2067    {"tessellation control shader", true},
2068    {"tessellation evaluation shader", false},
2069    {"geometry shader", true},
2070    {"fragment shader", false},
2071};
2072
2073static bool validate_push_constant_block_against_pipeline(layer_data *my_data, VkDevice dev,
2074                                                          std::vector<VkPushConstantRange> const *pushConstantRanges,
2075                                                          shader_module const *src, spirv_inst_iter type,
2076                                                          VkShaderStageFlagBits stage) {
2077    bool pass = true;
2078
2079    /* strip off ptrs etc */
2080    type = get_struct_type(src, type, false);
2081    assert(type != src->end());
2082
2083    /* validate directly off the offsets. this isn't quite correct for arrays
2084     * and matrices, but is a good first step. TODO: arrays, matrices, weird
2085     * sizes */
2086    for (auto insn : *src) {
2087        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
2088
2089            if (insn.word(3) == spv::DecorationOffset) {
2090                unsigned offset = insn.word(4);
2091                auto size = 4; /* bytes; TODO: calculate this based on the type */
2092
2093                bool found_range = false;
2094                for (auto const &range : *pushConstantRanges) {
2095                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
2096                        found_range = true;
2097
2098                        if ((range.stageFlags & stage) == 0) {
2099                            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2100                                        /* dev */ 0, __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2101                                        "Push constant range covering variable starting at "
2102                                        "offset %u not accessible from stage %s",
2103                                        offset, string_VkShaderStageFlagBits(stage))) {
2104                                pass = false;
2105                            }
2106                        }
2107
2108                        break;
2109                    }
2110                }
2111
2112                if (!found_range) {
2113                    if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2114                                /* dev */ 0, __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
2115                                "Push constant range covering variable starting at "
2116                                "offset %u not declared in layout",
2117                                offset)) {
2118                        pass = false;
2119                    }
2120                }
2121            }
2122        }
2123    }
2124
2125    return pass;
2126}
2127
2128static bool validate_push_constant_usage(layer_data *my_data, VkDevice dev,
2129                                         std::vector<VkPushConstantRange> const *pushConstantRanges, shader_module const *src,
2130                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2131    bool pass = true;
2132
2133    for (auto id : accessible_ids) {
2134        auto def_insn = src->get_def(id);
2135        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
2136            pass = validate_push_constant_block_against_pipeline(my_data, dev, pushConstantRanges, src,
2137                                                                 src->get_def(def_insn.word(1)), stage) &&
2138                   pass;
2139        }
2140    }
2141
2142    return pass;
2143}
2144
2145// For given pipelineLayout verify that the setLayout at slot.first
2146//  has the requested binding at slot.second
2147static bool has_descriptor_binding(layer_data *my_data, vector<VkDescriptorSetLayout> *pipelineLayout, descriptor_slot_t slot,
2148                                   VkDescriptorType &type, VkShaderStageFlags &stage_flags) {
2149    type = VkDescriptorType(0);
2150    stage_flags = VkShaderStageFlags(0);
2151
2152    if (!pipelineLayout)
2153        return false;
2154
2155    if (slot.first >= pipelineLayout->size())
2156        return false;
2157
2158    auto const layout_node = my_data->descriptorSetLayoutMap[(*pipelineLayout)[slot.first]];
2159
2160    auto bindingIt = layout_node->bindingToIndexMap.find(slot.second);
2161    if ((bindingIt == layout_node->bindingToIndexMap.end()) || (layout_node->createInfo.pBindings == NULL))
2162        return false;
2163
2164    assert(bindingIt->second < layout_node->createInfo.bindingCount);
2165    VkDescriptorSetLayoutBinding binding = layout_node->createInfo.pBindings[bindingIt->second];
2166    type = binding.descriptorType;
2167    stage_flags = binding.stageFlags;
2168
2169    return true;
2170}
2171
2172// Block of code at start here for managing/tracking Pipeline state that this layer cares about
2173
2174static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
2175
2176// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2177//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2178//   to that same cmd buffer by separate thread are not changing state from underneath us
2179// Track the last cmd buffer touched by this thread
2180
2181// prototype
2182static GLOBAL_CB_NODE *getCBNode(layer_data *, const VkCommandBuffer);
2183
2184static VkBool32 hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2185    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2186        if (pCB->drawCount[i])
2187            return VK_TRUE;
2188    }
2189    return VK_FALSE;
2190}
2191
2192// Check object status for selected flag state
2193static VkBool32 validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags enable_mask, CBStatusFlags status_mask,
2194                                CBStatusFlags status_flag, VkFlags msg_flags, DRAW_STATE_ERROR error_code, const char *fail_msg) {
2195    // If non-zero enable mask is present, check it against status but if enable_mask
2196    //  is 0 then no enable required so we should always just check status
2197    if ((!enable_mask) || (enable_mask & pNode->status)) {
2198        if ((pNode->status & status_mask) != status_flag) {
2199            // TODO : How to pass dispatchable objects as srcObject? Here src obj should be cmd buffer
2200            return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, error_code,
2201                           "DS", "CB object %#" PRIxLEAST64 ": %s", (uint64_t)(pNode->commandBuffer), fail_msg);
2202        }
2203    }
2204    return VK_FALSE;
2205}
2206
2207// Retrieve pipeline node ptr for given pipeline object
2208static PIPELINE_NODE *getPipeline(layer_data *my_data, const VkPipeline pipeline) {
2209    if (my_data->pipelineMap.find(pipeline) == my_data->pipelineMap.end()) {
2210        return NULL;
2211    }
2212    return my_data->pipelineMap[pipeline];
2213}
2214
2215// Return VK_TRUE if for a given PSO, the given state enum is dynamic, else return VK_FALSE
2216static VkBool32 isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
2217    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2218        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2219            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2220                return VK_TRUE;
2221        }
2222    }
2223    return VK_FALSE;
2224}
2225
2226// Validate state stored as flags at time of draw call
2227static VkBool32 validate_draw_state_flags(layer_data *my_data, GLOBAL_CB_NODE *pCB, VkBool32 indexedDraw) {
2228    VkBool32 result;
2229    result =
2230        validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_VIEWPORT_SET, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2231                        DRAWSTATE_VIEWPORT_NOT_BOUND, "Dynamic viewport state not set for this command buffer");
2232    result |=
2233        validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_SCISSOR_SET, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2234                        DRAWSTATE_SCISSOR_NOT_BOUND, "Dynamic scissor state not set for this command buffer");
2235    result |= validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_LINE_WIDTH_SET, CBSTATUS_LINE_WIDTH_SET,
2236                              VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_LINE_WIDTH_NOT_BOUND,
2237                              "Dynamic line width state not set for this command buffer");
2238    result |= validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_DEPTH_BIAS_SET, CBSTATUS_DEPTH_BIAS_SET,
2239                              VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_DEPTH_BIAS_NOT_BOUND,
2240                              "Dynamic depth bias state not set for this command buffer");
2241    result |= validate_status(my_data, pCB, CBSTATUS_COLOR_BLEND_WRITE_ENABLE, CBSTATUS_BLEND_SET, CBSTATUS_BLEND_SET,
2242                              VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_BLEND_NOT_BOUND,
2243                              "Dynamic blend object state not set for this command buffer");
2244    result |= validate_status(my_data, pCB, CBSTATUS_DEPTH_WRITE_ENABLE, CBSTATUS_DEPTH_BOUNDS_SET, CBSTATUS_DEPTH_BOUNDS_SET,
2245                              VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND,
2246                              "Dynamic depth bounds state not set for this command buffer");
2247    result |= validate_status(my_data, pCB, CBSTATUS_STENCIL_TEST_ENABLE, CBSTATUS_STENCIL_READ_MASK_SET,
2248                              CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_STENCIL_NOT_BOUND,
2249                              "Dynamic stencil read mask state not set for this command buffer");
2250    result |= validate_status(my_data, pCB, CBSTATUS_STENCIL_TEST_ENABLE, CBSTATUS_STENCIL_WRITE_MASK_SET,
2251                              CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_STENCIL_NOT_BOUND,
2252                              "Dynamic stencil write mask state not set for this command buffer");
2253    result |= validate_status(my_data, pCB, CBSTATUS_STENCIL_TEST_ENABLE, CBSTATUS_STENCIL_REFERENCE_SET,
2254                              CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_STENCIL_NOT_BOUND,
2255                              "Dynamic stencil reference state not set for this command buffer");
2256    if (indexedDraw)
2257        result |= validate_status(my_data, pCB, CBSTATUS_NONE, CBSTATUS_INDEX_BUFFER_BOUND, CBSTATUS_INDEX_BUFFER_BOUND,
2258                                  VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2259                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2260    return result;
2261}
2262
2263// Verify attachment reference compatibility according to spec
2264//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2265//  If both AttachmentReference arrays have requested index, check their corresponding AttachementDescriptions
2266//   to make sure that format and samples counts match.
2267//  If not, they are not compatible.
2268static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2269                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2270                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2271                                             const VkAttachmentDescription *pSecondaryAttachments) {
2272    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2273        if (VK_ATTACHMENT_UNUSED != pSecondary[index].attachment)
2274            return false;
2275    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2276        if (VK_ATTACHMENT_UNUSED != pPrimary[index].attachment)
2277            return false;
2278    } else { // format and sample count must match
2279        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2280             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2281            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2282             pSecondaryAttachments[pSecondary[index].attachment].samples))
2283            return true;
2284    }
2285    // Format and sample counts didn't match
2286    return false;
2287}
2288
2289// For give primary and secondary RenderPass objects, verify that they're compatible
2290static bool verify_renderpass_compatibility(layer_data *my_data, const VkRenderPass primaryRP, const VkRenderPass secondaryRP,
2291                                            string &errorMsg) {
2292    stringstream errorStr;
2293    if (my_data->renderPassMap.find(primaryRP) == my_data->renderPassMap.end()) {
2294        errorStr << "invalid VkRenderPass (" << primaryRP << ")";
2295        errorMsg = errorStr.str();
2296        return false;
2297    } else if (my_data->renderPassMap.find(secondaryRP) == my_data->renderPassMap.end()) {
2298        errorStr << "invalid VkRenderPass (" << secondaryRP << ")";
2299        errorMsg = errorStr.str();
2300        return false;
2301    }
2302    // Trivial pass case is exact same RP
2303    if (primaryRP == secondaryRP) {
2304        return true;
2305    }
2306    const VkRenderPassCreateInfo *primaryRPCI = my_data->renderPassMap[primaryRP]->pCreateInfo;
2307    const VkRenderPassCreateInfo *secondaryRPCI = my_data->renderPassMap[secondaryRP]->pCreateInfo;
2308    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2309        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2310                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2311        errorMsg = errorStr.str();
2312        return false;
2313    }
2314    uint32_t spIndex = 0;
2315    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2316        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2317        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2318        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2319        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2320        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2321            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2322                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2323                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2324                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2325                errorMsg = errorStr.str();
2326                return false;
2327            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2328                                                         primaryColorCount, primaryRPCI->pAttachments,
2329                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2330                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2331                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2332                errorMsg = errorStr.str();
2333                return false;
2334            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2335                                                         primaryColorCount, primaryRPCI->pAttachments,
2336                                                         secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2337                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2338                errorStr << "depth/stencil attachments at index " << cIdx << " of subpass index " << spIndex
2339                         << " are not compatible.";
2340                errorMsg = errorStr.str();
2341                return false;
2342            }
2343        }
2344        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2345        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2346        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2347        for (uint32_t i = 0; i < inputMax; ++i) {
2348            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2349                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2350                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2351                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2352                errorMsg = errorStr.str();
2353                return false;
2354            }
2355        }
2356    }
2357    return true;
2358}
2359
2360// For give SET_NODE, verify that its Set is compatible w/ the setLayout corresponding to pipelineLayout[layoutIndex]
2361static bool verify_set_layout_compatibility(layer_data *my_data, const SET_NODE *pSet, const VkPipelineLayout layout,
2362                                            const uint32_t layoutIndex, string &errorMsg) {
2363    stringstream errorStr;
2364    if (my_data->pipelineLayoutMap.find(layout) == my_data->pipelineLayoutMap.end()) {
2365        errorStr << "invalid VkPipelineLayout (" << layout << ")";
2366        errorMsg = errorStr.str();
2367        return false;
2368    }
2369    PIPELINE_LAYOUT_NODE pl = my_data->pipelineLayoutMap[layout];
2370    if (layoutIndex >= pl.descriptorSetLayouts.size()) {
2371        errorStr << "VkPipelineLayout (" << layout << ") only contains " << pl.descriptorSetLayouts.size()
2372                 << " setLayouts corresponding to sets 0-" << pl.descriptorSetLayouts.size() - 1
2373                 << ", but you're attempting to bind set to index " << layoutIndex;
2374        errorMsg = errorStr.str();
2375        return false;
2376    }
2377    // Get the specific setLayout from PipelineLayout that overlaps this set
2378    LAYOUT_NODE *pLayoutNode = my_data->descriptorSetLayoutMap[pl.descriptorSetLayouts[layoutIndex]];
2379    if (pLayoutNode->layout == pSet->pLayout->layout) { // trivial pass case
2380        return true;
2381    }
2382    size_t descriptorCount = pLayoutNode->descriptorTypes.size();
2383    if (descriptorCount != pSet->pLayout->descriptorTypes.size()) {
2384        errorStr << "setLayout " << layoutIndex << " from pipelineLayout " << layout << " has " << descriptorCount
2385                 << " descriptors, but corresponding set being bound has " << pSet->pLayout->descriptorTypes.size()
2386                 << " descriptors.";
2387        errorMsg = errorStr.str();
2388        return false; // trivial fail case
2389    }
2390    // Now need to check set against corresponding pipelineLayout to verify compatibility
2391    for (size_t i = 0; i < descriptorCount; ++i) {
2392        // Need to verify that layouts are identically defined
2393        //  TODO : Is below sufficient? Making sure that types & stageFlags match per descriptor
2394        //    do we also need to check immutable samplers?
2395        if (pLayoutNode->descriptorTypes[i] != pSet->pLayout->descriptorTypes[i]) {
2396            errorStr << "descriptor " << i << " for descriptorSet being bound is type '"
2397                     << string_VkDescriptorType(pSet->pLayout->descriptorTypes[i])
2398                     << "' but corresponding descriptor from pipelineLayout is type '"
2399                     << string_VkDescriptorType(pLayoutNode->descriptorTypes[i]) << "'";
2400            errorMsg = errorStr.str();
2401            return false;
2402        }
2403        if (pLayoutNode->stageFlags[i] != pSet->pLayout->stageFlags[i]) {
2404            errorStr << "stageFlags " << i << " for descriptorSet being bound is " << pSet->pLayout->stageFlags[i]
2405                     << "' but corresponding descriptor from pipelineLayout has stageFlags " << pLayoutNode->stageFlags[i];
2406            errorMsg = errorStr.str();
2407            return false;
2408        }
2409    }
2410    return true;
2411}
2412
2413// Validate that data for each specialization entry is fully contained within the buffer.
2414static VkBool32 validate_specialization_offsets(layer_data *my_data, VkPipelineShaderStageCreateInfo const *info) {
2415    VkBool32 pass = VK_TRUE;
2416
2417    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2418
2419    if (spec) {
2420        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2421            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2422                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2423                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2424                            "Specialization entry %u (for constant id %u) references memory outside provided "
2425                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2426                            " bytes provided)",
2427                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2428                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2429
2430                    pass = VK_FALSE;
2431                }
2432            }
2433        }
2434    }
2435
2436    return pass;
2437}
2438
2439static bool descriptor_type_match(layer_data *my_data, shader_module const *module, uint32_t type_id,
2440                                  VkDescriptorType descriptor_type) {
2441    auto type = module->get_def(type_id);
2442
2443    /* Strip off any array or ptrs */
2444    /* TODO: if we see an array type here, we should make use of it in order to
2445     * validate the number of descriptors actually required to be set in the
2446     * API.
2447     */
2448    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2449        type = module->get_def(type.word(type.opcode() == spv::OpTypeArray ? 2 : 3));
2450    }
2451
2452    switch (type.opcode()) {
2453    case spv::OpTypeStruct: {
2454        for (auto insn : *module) {
2455            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2456                if (insn.word(2) == spv::DecorationBlock) {
2457                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2458                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2459                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2460                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2461                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2462                }
2463            }
2464        }
2465
2466        /* Invalid */
2467        return false;
2468    }
2469
2470    case spv::OpTypeSampler:
2471        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER;
2472
2473    case spv::OpTypeSampledImage:
2474        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2475
2476    case spv::OpTypeImage: {
2477        /* Many descriptor types backing image types-- depends on dimension
2478         * and whether the image will be used with a sampler. SPIRV for
2479         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2480         * runtime is unacceptable.
2481         */
2482        auto dim = type.word(3);
2483        auto sampled = type.word(7);
2484
2485        if (dim == spv::DimSubpassData) {
2486            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2487        } else if (dim == spv::DimBuffer) {
2488            if (sampled == 1) {
2489                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2490            } else {
2491                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2492            }
2493        } else if (sampled == 1) {
2494            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
2495        } else {
2496            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2497        }
2498    }
2499
2500    /* We shouldn't really see any other junk types -- but if we do, they're
2501     * a mismatch.
2502     */
2503    default:
2504        return false; /* Mismatch */
2505    }
2506}
2507
2508static VkBool32 require_feature(layer_data *my_data, VkBool32 feature, char const *feature_name) {
2509    if (!feature) {
2510        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2511                    /* dev */ 0, __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2512                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2513                    "enabled on the device",
2514                    feature_name)) {
2515            return false;
2516        }
2517    }
2518
2519    return true;
2520}
2521
2522static VkBool32 validate_shader_capabilities(layer_data *my_data, VkDevice dev, shader_module const *src)
2523{
2524    VkBool32 pass = VK_TRUE;
2525
2526    auto enabledFeatures = &my_data->physDevProperties.features;
2527
2528    for (auto insn : *src) {
2529        if (insn.opcode() == spv::OpCapability) {
2530            switch (insn.word(1)) {
2531            case spv::CapabilityMatrix:
2532            case spv::CapabilityShader:
2533            case spv::CapabilityInputAttachment:
2534            case spv::CapabilitySampled1D:
2535            case spv::CapabilityImage1D:
2536            case spv::CapabilitySampledBuffer:
2537            case spv::CapabilityImageBuffer:
2538            case spv::CapabilityImageQuery:
2539            case spv::CapabilityDerivativeControl:
2540                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2541                break;
2542
2543            case spv::CapabilityGeometry:
2544                pass &= require_feature(my_data, enabledFeatures->geometryShader, "geometryShader");
2545                break;
2546
2547            case spv::CapabilityTessellation:
2548                pass &= require_feature(my_data, enabledFeatures->tessellationShader, "tessellationShader");
2549                break;
2550
2551            case spv::CapabilityFloat64:
2552                pass &= require_feature(my_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2553                break;
2554
2555            case spv::CapabilityInt64:
2556                pass &= require_feature(my_data, enabledFeatures->shaderInt64, "shaderInt64");
2557                break;
2558
2559            case spv::CapabilityTessellationPointSize:
2560            case spv::CapabilityGeometryPointSize:
2561                pass &= require_feature(my_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2562                                        "shaderTessellationAndGeometryPointSize");
2563                break;
2564
2565            case spv::CapabilityImageGatherExtended:
2566                pass &= require_feature(my_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2567                break;
2568
2569            case spv::CapabilityStorageImageMultisample:
2570                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2571                break;
2572
2573            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2574                pass &= require_feature(my_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2575                                        "shaderUniformBufferArrayDynamicIndexing");
2576                break;
2577
2578            case spv::CapabilitySampledImageArrayDynamicIndexing:
2579                pass &= require_feature(my_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2580                                        "shaderSampledImageArrayDynamicIndexing");
2581                break;
2582
2583            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2584                pass &= require_feature(my_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2585                                        "shaderStorageBufferArrayDynamicIndexing");
2586                break;
2587
2588            case spv::CapabilityStorageImageArrayDynamicIndexing:
2589                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2590                                        "shaderStorageImageArrayDynamicIndexing");
2591                break;
2592
2593            case spv::CapabilityClipDistance:
2594                pass &= require_feature(my_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2595                break;
2596
2597            case spv::CapabilityCullDistance:
2598                pass &= require_feature(my_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2599                break;
2600
2601            case spv::CapabilityImageCubeArray:
2602                pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2603                break;
2604
2605            case spv::CapabilitySampleRateShading:
2606                pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2607                break;
2608
2609            case spv::CapabilitySparseResidency:
2610                pass &= require_feature(my_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2611                break;
2612
2613            case spv::CapabilityMinLod:
2614                pass &= require_feature(my_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2615                break;
2616
2617            case spv::CapabilitySampledCubeArray:
2618                pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2619                break;
2620
2621            case spv::CapabilityImageMSArray:
2622                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2623                break;
2624
2625            case spv::CapabilityStorageImageExtendedFormats:
2626                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageExtendedFormats,
2627                                        "shaderStorageImageExtendedFormats");
2628                break;
2629
2630            case spv::CapabilityInterpolationFunction:
2631                pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2632                break;
2633
2634            case spv::CapabilityStorageImageReadWithoutFormat:
2635                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2636                                        "shaderStorageImageReadWithoutFormat");
2637                break;
2638
2639            case spv::CapabilityStorageImageWriteWithoutFormat:
2640                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2641                                        "shaderStorageImageWriteWithoutFormat");
2642                break;
2643
2644            case spv::CapabilityMultiViewport:
2645                pass &= require_feature(my_data, enabledFeatures->multiViewport, "multiViewport");
2646                break;
2647
2648            default:
2649                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /* dev */0,
2650                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2651                            "Shader declares capability %u, not supported in Vulkan.",
2652                            insn.word(1)))
2653                    pass = VK_FALSE;
2654                break;
2655            }
2656        }
2657    }
2658
2659    return pass;
2660}
2661
2662
2663// Validate that the shaders used by the given pipeline
2664//  As a side effect this function also records the sets that are actually used by the pipeline
2665static VkBool32 validate_pipeline_shaders(layer_data *my_data, VkDevice dev, PIPELINE_NODE *pPipeline) {
2666    VkGraphicsPipelineCreateInfo const *pCreateInfo = &pPipeline->graphicsPipelineCI;
2667    /* We seem to allow pipeline stages to be specified out of order, so collect and identify them
2668     * before trying to do anything more: */
2669    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2670    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2671
2672    shader_module *shaders[5];
2673    memset(shaders, 0, sizeof(shaders));
2674    spirv_inst_iter entrypoints[5];
2675    memset(entrypoints, 0, sizeof(entrypoints));
2676    RENDER_PASS_NODE const *rp = 0;
2677    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2678    VkBool32 pass = VK_TRUE;
2679
2680    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2681        VkPipelineShaderStageCreateInfo const *pStage = &pCreateInfo->pStages[i];
2682        if (pStage->sType == VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO) {
2683
2684            if ((pStage->stage & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT |
2685                                  VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) == 0) {
2686                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2687                            /*dev*/ 0, __LINE__, SHADER_CHECKER_UNKNOWN_STAGE, "SC", "Unknown shader stage %d", pStage->stage)) {
2688                    pass = VK_FALSE;
2689                }
2690            } else {
2691                pass = validate_specialization_offsets(my_data, pStage) && pass;
2692
2693                auto stage_id = get_shader_stage_id(pStage->stage);
2694                shader_module *module = my_data->shaderModuleMap[pStage->module];
2695                shaders[stage_id] = module;
2696
2697                /* find the entrypoint */
2698                entrypoints[stage_id] = find_entrypoint(module, pStage->pName, pStage->stage);
2699                if (entrypoints[stage_id] == module->end()) {
2700                    if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2701                                /*dev*/ 0, __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2702                                "No entrypoint found named `%s` for stage %s", pStage->pName,
2703                                string_VkShaderStageFlagBits(pStage->stage))) {
2704                        pass = VK_FALSE;
2705                    }
2706                }
2707
2708                /* validate shader capabilities against enabled device features */
2709                pass = validate_shader_capabilities(my_data, dev, module) && pass;
2710
2711                /* mark accessible ids */
2712                std::unordered_set<uint32_t> accessible_ids;
2713                mark_accessible_ids(module, entrypoints[stage_id], accessible_ids);
2714
2715                /* validate descriptor set layout against what the entrypoint actually uses */
2716                std::map<descriptor_slot_t, interface_var> descriptor_uses;
2717                collect_interface_by_descriptor_slot(my_data, dev, module, accessible_ids, descriptor_uses);
2718
2719                auto layouts = pCreateInfo->layout != VK_NULL_HANDLE
2720                                   ? &(my_data->pipelineLayoutMap[pCreateInfo->layout].descriptorSetLayouts)
2721                                   : nullptr;
2722
2723                for (auto it = descriptor_uses.begin(); it != descriptor_uses.end(); it++) {
2724                    // As a side-effect of this function, capture which sets are used by the pipeline
2725                    pPipeline->active_sets.insert(it->first.first);
2726
2727                    /* find the matching binding */
2728                    VkDescriptorType descriptor_type;
2729                    VkShaderStageFlags descriptor_stage_flags;
2730                    auto found = has_descriptor_binding(my_data, layouts, it->first, descriptor_type, descriptor_stage_flags);
2731
2732                    if (!found) {
2733                        char type_name[1024];
2734                        describe_type(type_name, module, it->second.type_id);
2735                        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2736                                    /*dev*/ 0, __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2737                                    "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2738                                    it->first.first, it->first.second, type_name)) {
2739                            pass = VK_FALSE;
2740                        }
2741                    } else if (~descriptor_stage_flags & pStage->stage) {
2742                        char type_name[1024];
2743                        describe_type(type_name, module, it->second.type_id);
2744                        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2745                                    /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2746                                    "Shader uses descriptor slot %u.%u (used "
2747                                    "as type `%s`) but descriptor not "
2748                                    "accessible from stage %s",
2749                                    it->first.first, it->first.second, type_name, string_VkShaderStageFlagBits(pStage->stage))) {
2750                            pass = VK_FALSE;
2751                        }
2752                    } else if (!descriptor_type_match(my_data, module, it->second.type_id, descriptor_type)) {
2753                        char type_name[1024];
2754                        describe_type(type_name, module, it->second.type_id);
2755                        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2756                                    /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2757                                    "Type mismatch on descriptor slot "
2758                                    "%u.%u (used as type `%s`) but "
2759                                    "descriptor of type %s",
2760                                    it->first.first, it->first.second, type_name, string_VkDescriptorType(descriptor_type))) {
2761                            pass = VK_FALSE;
2762                        }
2763                    }
2764                }
2765
2766                /* validate push constant usage */
2767                pass =
2768                    validate_push_constant_usage(my_data, dev, &my_data->pipelineLayoutMap[pCreateInfo->layout].pushConstantRanges,
2769                                                 module, accessible_ids, pStage->stage) &&
2770                    pass;
2771            }
2772        }
2773    }
2774
2775    if (pCreateInfo->renderPass != VK_NULL_HANDLE)
2776        rp = my_data->renderPassMap[pCreateInfo->renderPass];
2777
2778    vi = pCreateInfo->pVertexInputState;
2779
2780    if (vi) {
2781        pass = validate_vi_consistency(my_data, dev, vi) && pass;
2782    }
2783
2784    if (shaders[vertex_stage]) {
2785        pass = validate_vi_against_vs_inputs(my_data, dev, vi, shaders[vertex_stage], entrypoints[vertex_stage]) && pass;
2786    }
2787
2788    /* TODO: enforce rules about present combinations of shaders */
2789    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2790    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2791
2792    while (!shaders[producer] && producer != fragment_stage) {
2793        producer++;
2794        consumer++;
2795    }
2796
2797    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2798        assert(shaders[producer]);
2799        if (shaders[consumer]) {
2800            pass = validate_interface_between_stages(my_data, dev, shaders[producer], entrypoints[producer],
2801                                                     shader_stage_attribs[producer].name, shaders[consumer], entrypoints[consumer],
2802                                                     shader_stage_attribs[consumer].name,
2803                                                     shader_stage_attribs[consumer].arrayed_input) &&
2804                   pass;
2805
2806            producer = consumer;
2807        }
2808    }
2809
2810    if (shaders[fragment_stage] && rp) {
2811        pass = validate_fs_outputs_against_render_pass(my_data, dev, shaders[fragment_stage], entrypoints[fragment_stage], rp,
2812                                                       pCreateInfo->subpass) &&
2813               pass;
2814    }
2815
2816    return pass;
2817}
2818
2819// Return Set node ptr for specified set or else NULL
2820static SET_NODE *getSetNode(layer_data *my_data, const VkDescriptorSet set) {
2821    if (my_data->setMap.find(set) == my_data->setMap.end()) {
2822        return NULL;
2823    }
2824    return my_data->setMap[set];
2825}
2826// For the given command buffer, verify that for each set set in activeSetNodes
2827//  that any dynamic descriptor in that set has a valid dynamic offset bound.
2828//  To be valid, the dynamic offset combined with the offet and range from its
2829//  descriptor update must not overflow the size of its buffer being updated
2830static VkBool32 validate_dynamic_offsets(layer_data *my_data, const GLOBAL_CB_NODE *pCB, const vector<SET_NODE *> activeSetNodes) {
2831    VkBool32 result = VK_FALSE;
2832
2833    VkWriteDescriptorSet *pWDS = NULL;
2834    uint32_t dynOffsetIndex = 0;
2835    VkDeviceSize bufferSize = 0;
2836    for (auto set_node : activeSetNodes) {
2837        for (uint32_t i = 0; i < set_node->descriptorCount; ++i) {
2838            // TODO: Add validation for descriptors dynamically skipped in shader
2839            if (set_node->ppDescriptors[i] != NULL) {
2840                switch (set_node->ppDescriptors[i]->sType) {
2841                case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
2842                    pWDS = (VkWriteDescriptorSet *)set_node->ppDescriptors[i];
2843                    if ((pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
2844                        (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
2845                        for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2846                            bufferSize = my_data->bufferMap[pWDS->pBufferInfo[j].buffer].create_info->size;
2847                            if (pWDS->pBufferInfo[j].range == VK_WHOLE_SIZE) {
2848                                if ((pCB->dynamicOffsets[dynOffsetIndex] + pWDS->pBufferInfo[j].offset) > bufferSize) {
2849                                    result |=
2850                                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2851                                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)set_node->set, __LINE__,
2852                                                DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
2853                                                "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has range of "
2854                                                "VK_WHOLE_SIZE but dynamic offset %u "
2855                                                "combined with offet %#" PRIxLEAST64 " oversteps its buffer (%#" PRIxLEAST64
2856                                                ") which has a size of %#" PRIxLEAST64 ".",
2857                                                (uint64_t)set_node->set, i, pCB->dynamicOffsets[dynOffsetIndex],
2858                                                pWDS->pBufferInfo[j].offset, (uint64_t)pWDS->pBufferInfo[j].buffer, bufferSize);
2859                                }
2860                            } else if ((pCB->dynamicOffsets[dynOffsetIndex] + pWDS->pBufferInfo[j].offset +
2861                                        pWDS->pBufferInfo[j].range) > bufferSize) {
2862                                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2863                                                  VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)set_node->set, __LINE__,
2864                                                  DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
2865                                                  "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has dynamic offset %u. "
2866                                                  "Combined with offet %#" PRIxLEAST64 " and range %#" PRIxLEAST64
2867                                                  " from its update, this oversteps its buffer "
2868                                                  "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".",
2869                                                  (uint64_t)set_node->set, i, pCB->dynamicOffsets[dynOffsetIndex],
2870                                                  pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range,
2871                                                  (uint64_t)pWDS->pBufferInfo[j].buffer, bufferSize);
2872                            } else if ((pCB->dynamicOffsets[dynOffsetIndex] + pWDS->pBufferInfo[j].offset +
2873                                        pWDS->pBufferInfo[j].range) > bufferSize) {
2874                                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2875                                                  VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)set_node->set, __LINE__,
2876                                                  DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
2877                                                  "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has dynamic offset %u. "
2878                                                  "Combined with offet %#" PRIxLEAST64 " and range %#" PRIxLEAST64
2879                                                  " from its update, this oversteps its buffer "
2880                                                  "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".",
2881                                                  (uint64_t)set_node->set, i, pCB->dynamicOffsets[dynOffsetIndex],
2882                                                  pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range,
2883                                                  (uint64_t)pWDS->pBufferInfo[j].buffer, bufferSize);
2884                            }
2885                            dynOffsetIndex++;
2886                            i += j; // Advance i to end of this set of descriptors (++i at end of for loop will move 1 index past
2887                                    // last of these descriptors)
2888                        }
2889                    }
2890                    break;
2891                default: // Currently only shadowing Write update nodes so shouldn't get here
2892                    assert(0);
2893                    continue;
2894                }
2895            }
2896        }
2897    }
2898    return result;
2899}
2900
2901// Validate overall state at the time of a draw call
2902static VkBool32 validate_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, VkBool32 indexedDraw) {
2903    // First check flag states
2904    VkBool32 result = validate_draw_state_flags(my_data, pCB, indexedDraw);
2905    PIPELINE_NODE *pPipe = getPipeline(my_data, pCB->lastBoundPipeline);
2906    // Now complete other state checks
2907    // TODO : Currently only performing next check if *something* was bound (non-zero last bound)
2908    //  There is probably a better way to gate when this check happens, and to know if something *should* have been bound
2909    //  We should have that check separately and then gate this check based on that check
2910    if (pPipe) {
2911        if (pCB->lastBoundPipelineLayout) {
2912            string errorString;
2913            // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
2914            vector<SET_NODE *> activeSetNodes;
2915            for (auto setIndex : pPipe->active_sets) {
2916                // If valid set is not bound throw an error
2917                if ((pCB->boundDescriptorSets.size() <= setIndex) || (!pCB->boundDescriptorSets[setIndex])) {
2918                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2919                                      __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
2920                                      "VkPipeline %#" PRIxLEAST64 " uses set #%u but that set is not bound.",
2921                                      (uint64_t)pPipe->pipeline, setIndex);
2922                } else if (!verify_set_layout_compatibility(my_data, my_data->setMap[pCB->boundDescriptorSets[setIndex]],
2923                                                            pPipe->graphicsPipelineCI.layout, setIndex, errorString)) {
2924                    // Set is bound but not compatible w/ overlapping pipelineLayout from PSO
2925                    VkDescriptorSet setHandle = my_data->setMap[pCB->boundDescriptorSets[setIndex]]->set;
2926                    result |= log_msg(
2927                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2928                        (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
2929                        "VkDescriptorSet (%#" PRIxLEAST64
2930                        ") bound as set #%u is not compatible with overlapping VkPipelineLayout %#" PRIxLEAST64 " due to: %s",
2931                        (uint64_t)setHandle, setIndex, (uint64_t)pPipe->graphicsPipelineCI.layout, errorString.c_str());
2932                } else { // Valid set is bound and layout compatible, validate that it's updated and verify any dynamic offsets
2933                    // Pull the set node
2934                    SET_NODE *pSet = my_data->setMap[pCB->boundDescriptorSets[setIndex]];
2935                    // Save vector of all active sets to verify dynamicOffsets below
2936                    activeSetNodes.push_back(pSet);
2937                    // Make sure set has been updated
2938                    if (!pSet->pUpdateStructs) {
2939                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2940                                          VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pSet->set, __LINE__,
2941                                          DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2942                                          "DS %#" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
2943                                                              "this will result in undefined behavior.",
2944                                          (uint64_t)pSet->set);
2945                    }
2946                }
2947            }
2948            // For each dynamic descriptor, make sure dynamic offset doesn't overstep buffer
2949            if (!pCB->dynamicOffsets.empty())
2950                result |= validate_dynamic_offsets(my_data, pCB, activeSetNodes);
2951        }
2952        // Verify Vtx binding
2953        if (pPipe->vtxBindingCount > 0) {
2954            VkPipelineVertexInputStateCreateInfo *vtxInCI = &pPipe->vertexInputCI;
2955            for (uint32_t i = 0; i < vtxInCI->vertexBindingDescriptionCount; i++) {
2956                if ((pCB->currentDrawData.buffers.size() < (i + 1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) {
2957                    result |= log_msg(
2958                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2959                        DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2960                        "The Pipeline State Object (%#" PRIxLEAST64
2961                        ") expects that this Command Buffer's vertex binding Index %d should be set via vkCmdBindVertexBuffers.",
2962                        (uint64_t)pCB->lastBoundPipeline, i);
2963                }
2964            }
2965        } else {
2966            if (!pCB->currentDrawData.buffers.empty()) {
2967                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
2968                                  0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2969                                  "Vertex buffers are bound to command buffer (%#" PRIxLEAST64
2970                                  ") but no vertex buffers are attached to this Pipeline State Object (%#" PRIxLEAST64 ").",
2971                                  (uint64_t)pCB->commandBuffer, (uint64_t)pCB->lastBoundPipeline);
2972            }
2973        }
2974        // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2975        // Skip check if rasterization is disabled or there is no viewport.
2976        if ((!pPipe->graphicsPipelineCI.pRasterizationState ||
2977             !pPipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) &&
2978            pPipe->graphicsPipelineCI.pViewportState) {
2979            VkBool32 dynViewport = isDynamic(pPipe, VK_DYNAMIC_STATE_VIEWPORT);
2980            VkBool32 dynScissor = isDynamic(pPipe, VK_DYNAMIC_STATE_SCISSOR);
2981            if (dynViewport) {
2982                if (pCB->viewports.size() != pPipe->graphicsPipelineCI.pViewportState->viewportCount) {
2983                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2984                                      __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2985                                      "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER
2986                                      ", but PSO viewportCount is %u. These counts must match.",
2987                                      pCB->viewports.size(), pPipe->graphicsPipelineCI.pViewportState->viewportCount);
2988                }
2989            }
2990            if (dynScissor) {
2991                if (pCB->scissors.size() != pPipe->graphicsPipelineCI.pViewportState->scissorCount) {
2992                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2993                                      __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2994                                      "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER
2995                                      ", but PSO scissorCount is %u. These counts must match.",
2996                                      pCB->scissors.size(), pPipe->graphicsPipelineCI.pViewportState->scissorCount);
2997                }
2998            }
2999        }
3000    }
3001    return result;
3002}
3003
3004// Verify that create state for a pipeline is valid
3005static VkBool32 verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
3006                                          int pipelineIndex) {
3007    VkBool32 skipCall = VK_FALSE;
3008
3009    PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
3010
3011    // If create derivative bit is set, check that we've specified a base
3012    // pipeline correctly, and that the base pipeline was created to allow
3013    // derivatives.
3014    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3015        PIPELINE_NODE *pBasePipeline = nullptr;
3016        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3017              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3018            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3019                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3020                                "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3021        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3022            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3023                skipCall |=
3024                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3025                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3026                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
3027            } else {
3028                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3029            }
3030        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3031            pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3032        }
3033
3034        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3035            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3036                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3037                                "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3038        }
3039    }
3040
3041    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3042        if (!my_data->physDevProperties.features.independentBlend) {
3043            VkPipelineColorBlendAttachmentState *pAttachments = pPipeline->pAttachments;
3044            for (uint32_t i = 1; i < pPipeline->attachmentCount; i++) {
3045                if ((pAttachments[0].blendEnable != pAttachments[i].blendEnable) ||
3046                    (pAttachments[0].srcColorBlendFactor != pAttachments[i].srcColorBlendFactor) ||
3047                    (pAttachments[0].dstColorBlendFactor != pAttachments[i].dstColorBlendFactor) ||
3048                    (pAttachments[0].colorBlendOp != pAttachments[i].colorBlendOp) ||
3049                    (pAttachments[0].srcAlphaBlendFactor != pAttachments[i].srcAlphaBlendFactor) ||
3050                    (pAttachments[0].dstAlphaBlendFactor != pAttachments[i].dstAlphaBlendFactor) ||
3051                    (pAttachments[0].alphaBlendOp != pAttachments[i].alphaBlendOp) ||
3052                    (pAttachments[0].colorWriteMask != pAttachments[i].colorWriteMask)) {
3053                    skipCall |=
3054                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3055                                DRAWSTATE_INDEPENDENT_BLEND, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
3056                                                                   "enabled, all elements of pAttachments must be identical");
3057                }
3058            }
3059        }
3060        if (!my_data->physDevProperties.features.logicOp &&
3061            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3062            skipCall |=
3063                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3064                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
3065                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
3066        }
3067        if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) &&
3068            ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) ||
3069             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
3070            skipCall |=
3071                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3072                        DRAWSTATE_INVALID_LOGIC_OP, "DS",
3073                        "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
3074        }
3075    }
3076
3077    // Ensure the subpass index is valid. If not, then validate_pipeline_shaders
3078    // produces nonsense errors that confuse users. Other layers should already
3079    // emit errors for renderpass being invalid.
3080    auto rp_data = my_data->renderPassMap.find(pPipeline->graphicsPipelineCI.renderPass);
3081    if (rp_data != my_data->renderPassMap.end() &&
3082        pPipeline->graphicsPipelineCI.subpass >= rp_data->second->pCreateInfo->subpassCount) {
3083        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3084                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3085                                                                           "is out of range for this renderpass (0..%u)",
3086                            pPipeline->graphicsPipelineCI.subpass, rp_data->second->pCreateInfo->subpassCount - 1);
3087    }
3088
3089    if (!validate_pipeline_shaders(my_data, device, pPipeline)) {
3090        skipCall = VK_TRUE;
3091    }
3092    // VS is required
3093    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3094        skipCall |=
3095            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3096                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3097    }
3098    // Either both or neither TC/TE shaders should be defined
3099    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3100        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3101        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3102                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3103                            "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3104    }
3105    // Compute shaders should be specified independent of Gfx shaders
3106    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3107        (pPipeline->active_shaders &
3108         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3109          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3110        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3111                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3112                            "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3113    }
3114    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3115    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3116    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3117        (pPipeline->iaStateCI.topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3118        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3119                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3120                                                                           "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3121                                                                           "topology for tessellation pipelines");
3122    }
3123    if (pPipeline->iaStateCI.topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3124        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3125            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3126                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3127                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3128                                                                               "topology is only valid for tessellation pipelines");
3129        }
3130        if (!pPipeline->tessStateCI.patchControlPoints || (pPipeline->tessStateCI.patchControlPoints > 32)) {
3131            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3132                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3133                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3134                                                                               "topology used with patchControlPoints value %u."
3135                                                                               " patchControlPoints should be >0 and <=32.",
3136                                pPipeline->tessStateCI.patchControlPoints);
3137        }
3138    }
3139    // Viewport state must be included if rasterization is enabled.
3140    // If the viewport state is included, the viewport and scissor counts should always match.
3141    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3142    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3143        !pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
3144        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3145            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3146                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3147                                                                           "and scissors are dynamic PSO must include "
3148                                                                           "viewportCount and scissorCount in pViewportState.");
3149        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3150                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3151            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3152                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3153                                "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3154                                pPipeline->vpStateCI.viewportCount, pPipeline->vpStateCI.scissorCount);
3155        } else {
3156            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3157            VkBool32 dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3158            VkBool32 dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3159            if (!dynViewport) {
3160                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3161                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3162                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3163                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3164                                        "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3165                                        "must either include pViewports data, or include viewport in pDynamicState and set it with "
3166                                        "vkCmdSetViewport().",
3167                                        pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3168                }
3169            }
3170            if (!dynScissor) {
3171                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3172                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3173                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3174                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3175                                        "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3176                                        "must either include pScissors data, or include scissor in pDynamicState and set it with "
3177                                        "vkCmdSetScissor().",
3178                                        pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3179                }
3180            }
3181        }
3182    }
3183    return skipCall;
3184}
3185
3186// Init the pipeline mapping info based on pipeline create info LL tree
3187//  Threading note : Calls to this function should wrapped in mutex
3188// TODO : this should really just be in the constructor for PIPELINE_NODE
3189static PIPELINE_NODE *initGraphicsPipeline(layer_data *dev_data, const VkGraphicsPipelineCreateInfo *pCreateInfo) {
3190    PIPELINE_NODE *pPipeline = new PIPELINE_NODE;
3191
3192    // First init create info
3193    memcpy(&pPipeline->graphicsPipelineCI, pCreateInfo, sizeof(VkGraphicsPipelineCreateInfo));
3194
3195    size_t bufferSize = 0;
3196    const VkPipelineVertexInputStateCreateInfo *pVICI = NULL;
3197    const VkPipelineColorBlendStateCreateInfo *pCBCI = NULL;
3198
3199    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
3200        const VkPipelineShaderStageCreateInfo *pPSSCI = &pCreateInfo->pStages[i];
3201
3202        switch (pPSSCI->stage) {
3203        case VK_SHADER_STAGE_VERTEX_BIT:
3204            memcpy(&pPipeline->vsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3205            pPipeline->active_shaders |= VK_SHADER_STAGE_VERTEX_BIT;
3206            break;
3207        case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
3208            memcpy(&pPipeline->tcsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3209            pPipeline->active_shaders |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
3210            break;
3211        case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
3212            memcpy(&pPipeline->tesCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3213            pPipeline->active_shaders |= VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
3214            break;
3215        case VK_SHADER_STAGE_GEOMETRY_BIT:
3216            memcpy(&pPipeline->gsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3217            pPipeline->active_shaders |= VK_SHADER_STAGE_GEOMETRY_BIT;
3218            break;
3219        case VK_SHADER_STAGE_FRAGMENT_BIT:
3220            memcpy(&pPipeline->fsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3221            pPipeline->active_shaders |= VK_SHADER_STAGE_FRAGMENT_BIT;
3222            break;
3223        case VK_SHADER_STAGE_COMPUTE_BIT:
3224            // TODO : Flag error, CS is specified through VkComputePipelineCreateInfo
3225            pPipeline->active_shaders |= VK_SHADER_STAGE_COMPUTE_BIT;
3226            break;
3227        default:
3228            // TODO : Flag error
3229            break;
3230        }
3231    }
3232    // Copy over GraphicsPipelineCreateInfo structure embedded pointers
3233    if (pCreateInfo->stageCount != 0) {
3234        pPipeline->graphicsPipelineCI.pStages = new VkPipelineShaderStageCreateInfo[pCreateInfo->stageCount];
3235        bufferSize = pCreateInfo->stageCount * sizeof(VkPipelineShaderStageCreateInfo);
3236        memcpy((void *)pPipeline->graphicsPipelineCI.pStages, pCreateInfo->pStages, bufferSize);
3237    }
3238    if (pCreateInfo->pVertexInputState != NULL) {
3239        memcpy((void *)&pPipeline->vertexInputCI, pCreateInfo->pVertexInputState, sizeof(VkPipelineVertexInputStateCreateInfo));
3240        // Copy embedded ptrs
3241        pVICI = pCreateInfo->pVertexInputState;
3242        pPipeline->vtxBindingCount = pVICI->vertexBindingDescriptionCount;
3243        if (pPipeline->vtxBindingCount) {
3244            pPipeline->pVertexBindingDescriptions = new VkVertexInputBindingDescription[pPipeline->vtxBindingCount];
3245            bufferSize = pPipeline->vtxBindingCount * sizeof(VkVertexInputBindingDescription);
3246            memcpy((void *)pPipeline->pVertexBindingDescriptions, pVICI->pVertexBindingDescriptions, bufferSize);
3247        }
3248        pPipeline->vtxAttributeCount = pVICI->vertexAttributeDescriptionCount;
3249        if (pPipeline->vtxAttributeCount) {
3250            pPipeline->pVertexAttributeDescriptions = new VkVertexInputAttributeDescription[pPipeline->vtxAttributeCount];
3251            bufferSize = pPipeline->vtxAttributeCount * sizeof(VkVertexInputAttributeDescription);
3252            memcpy((void *)pPipeline->pVertexAttributeDescriptions, pVICI->pVertexAttributeDescriptions, bufferSize);
3253        }
3254        pPipeline->graphicsPipelineCI.pVertexInputState = &pPipeline->vertexInputCI;
3255    }
3256    if (pCreateInfo->pInputAssemblyState != NULL) {
3257        memcpy((void *)&pPipeline->iaStateCI, pCreateInfo->pInputAssemblyState, sizeof(VkPipelineInputAssemblyStateCreateInfo));
3258        pPipeline->graphicsPipelineCI.pInputAssemblyState = &pPipeline->iaStateCI;
3259    }
3260    if (pCreateInfo->pTessellationState != NULL) {
3261        memcpy((void *)&pPipeline->tessStateCI, pCreateInfo->pTessellationState, sizeof(VkPipelineTessellationStateCreateInfo));
3262        pPipeline->graphicsPipelineCI.pTessellationState = &pPipeline->tessStateCI;
3263    }
3264    if (pCreateInfo->pViewportState != NULL) {
3265        memcpy((void *)&pPipeline->vpStateCI, pCreateInfo->pViewportState, sizeof(VkPipelineViewportStateCreateInfo));
3266        pPipeline->graphicsPipelineCI.pViewportState = &pPipeline->vpStateCI;
3267    }
3268    if (pCreateInfo->pRasterizationState != NULL) {
3269        memcpy((void *)&pPipeline->rsStateCI, pCreateInfo->pRasterizationState, sizeof(VkPipelineRasterizationStateCreateInfo));
3270        pPipeline->graphicsPipelineCI.pRasterizationState = &pPipeline->rsStateCI;
3271    }
3272    if (pCreateInfo->pMultisampleState != NULL) {
3273        memcpy((void *)&pPipeline->msStateCI, pCreateInfo->pMultisampleState, sizeof(VkPipelineMultisampleStateCreateInfo));
3274        pPipeline->graphicsPipelineCI.pMultisampleState = &pPipeline->msStateCI;
3275    }
3276    if (pCreateInfo->pDepthStencilState != NULL) {
3277        memcpy((void *)&pPipeline->dsStateCI, pCreateInfo->pDepthStencilState, sizeof(VkPipelineDepthStencilStateCreateInfo));
3278        pPipeline->graphicsPipelineCI.pDepthStencilState = &pPipeline->dsStateCI;
3279    }
3280    if (pCreateInfo->pColorBlendState != NULL) {
3281        memcpy((void *)&pPipeline->cbStateCI, pCreateInfo->pColorBlendState, sizeof(VkPipelineColorBlendStateCreateInfo));
3282        // Copy embedded ptrs
3283        pCBCI = pCreateInfo->pColorBlendState;
3284        pPipeline->attachmentCount = pCBCI->attachmentCount;
3285        if (pPipeline->attachmentCount) {
3286            pPipeline->pAttachments = new VkPipelineColorBlendAttachmentState[pPipeline->attachmentCount];
3287            bufferSize = pPipeline->attachmentCount * sizeof(VkPipelineColorBlendAttachmentState);
3288            memcpy((void *)pPipeline->pAttachments, pCBCI->pAttachments, bufferSize);
3289        }
3290        pPipeline->graphicsPipelineCI.pColorBlendState = &pPipeline->cbStateCI;
3291    }
3292    if (pCreateInfo->pDynamicState != NULL) {
3293        memcpy((void *)&pPipeline->dynStateCI, pCreateInfo->pDynamicState, sizeof(VkPipelineDynamicStateCreateInfo));
3294        if (pPipeline->dynStateCI.dynamicStateCount) {
3295            pPipeline->dynStateCI.pDynamicStates = new VkDynamicState[pPipeline->dynStateCI.dynamicStateCount];
3296            bufferSize = pPipeline->dynStateCI.dynamicStateCount * sizeof(VkDynamicState);
3297            memcpy((void *)pPipeline->dynStateCI.pDynamicStates, pCreateInfo->pDynamicState->pDynamicStates, bufferSize);
3298        }
3299        pPipeline->graphicsPipelineCI.pDynamicState = &pPipeline->dynStateCI;
3300    }
3301    pPipeline->active_sets.clear();
3302    return pPipeline;
3303}
3304
3305// Free the Pipeline nodes
3306static void deletePipelines(layer_data *my_data) {
3307    if (my_data->pipelineMap.size() <= 0)
3308        return;
3309    for (auto ii = my_data->pipelineMap.begin(); ii != my_data->pipelineMap.end(); ++ii) {
3310        if ((*ii).second->graphicsPipelineCI.stageCount != 0) {
3311            delete[](*ii).second->graphicsPipelineCI.pStages;
3312        }
3313        delete[](*ii).second->pVertexBindingDescriptions;
3314        delete[](*ii).second->pVertexAttributeDescriptions;
3315        delete[](*ii).second->pAttachments;
3316        if ((*ii).second->dynStateCI.dynamicStateCount != 0) {
3317            delete[](*ii).second->dynStateCI.pDynamicStates;
3318        }
3319        delete (*ii).second;
3320    }
3321    my_data->pipelineMap.clear();
3322}
3323
3324// For given pipeline, return number of MSAA samples, or one if MSAA disabled
3325static VkSampleCountFlagBits getNumSamples(layer_data *my_data, const VkPipeline pipeline) {
3326    PIPELINE_NODE *pPipe = my_data->pipelineMap[pipeline];
3327    if (VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pPipe->msStateCI.sType) {
3328        return pPipe->msStateCI.rasterizationSamples;
3329    }
3330    return VK_SAMPLE_COUNT_1_BIT;
3331}
3332
3333// Validate state related to the PSO
3334static VkBool32 validatePipelineState(layer_data *my_data, const GLOBAL_CB_NODE *pCB, const VkPipelineBindPoint pipelineBindPoint,
3335                                      const VkPipeline pipeline) {
3336    if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
3337        // Verify that any MSAA request in PSO matches sample# in bound FB
3338        // Skip the check if rasterization is disabled.
3339        PIPELINE_NODE *pPipeline = my_data->pipelineMap[pipeline];
3340        if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3341            !pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
3342            VkSampleCountFlagBits psoNumSamples = getNumSamples(my_data, pipeline);
3343            if (pCB->activeRenderPass) {
3344                const VkRenderPassCreateInfo *pRPCI = my_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
3345                const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
3346                VkSampleCountFlagBits subpassNumSamples = (VkSampleCountFlagBits)0;
3347                uint32_t i;
3348
3349                for (i = 0; i < pSD->colorAttachmentCount; i++) {
3350                    VkSampleCountFlagBits samples;
3351
3352                    if (pSD->pColorAttachments[i].attachment == VK_ATTACHMENT_UNUSED)
3353                        continue;
3354
3355                    samples = pRPCI->pAttachments[pSD->pColorAttachments[i].attachment].samples;
3356                    if (subpassNumSamples == (VkSampleCountFlagBits)0) {
3357                        subpassNumSamples = samples;
3358                    } else if (subpassNumSamples != samples) {
3359                        subpassNumSamples = (VkSampleCountFlagBits)-1;
3360                        break;
3361                    }
3362                }
3363                if (pSD->pDepthStencilAttachment && pSD->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3364                    const VkSampleCountFlagBits samples = pRPCI->pAttachments[pSD->pDepthStencilAttachment->attachment].samples;
3365                    if (subpassNumSamples == (VkSampleCountFlagBits)0)
3366                        subpassNumSamples = samples;
3367                    else if (subpassNumSamples != samples)
3368                        subpassNumSamples = (VkSampleCountFlagBits)-1;
3369                }
3370
3371                if (psoNumSamples != subpassNumSamples) {
3372                    return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3373                                   (uint64_t)pipeline, __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3374                                   "Num samples mismatch! Binding PSO (%#" PRIxLEAST64
3375                                   ") with %u samples while current RenderPass (%#" PRIxLEAST64 ") w/ %u samples!",
3376                                   (uint64_t)pipeline, psoNumSamples, (uint64_t)pCB->activeRenderPass, subpassNumSamples);
3377                }
3378            } else {
3379                // TODO : I believe it's an error if we reach this point and don't have an activeRenderPass
3380                //   Verify and flag error as appropriate
3381            }
3382        }
3383        // TODO : Add more checks here
3384    } else {
3385        // TODO : Validate non-gfx pipeline updates
3386    }
3387    return VK_FALSE;
3388}
3389
3390// Block of code at start here specifically for managing/tracking DSs
3391
3392// Return Pool node ptr for specified pool or else NULL
3393static DESCRIPTOR_POOL_NODE *getPoolNode(layer_data *my_data, const VkDescriptorPool pool) {
3394    if (my_data->descriptorPoolMap.find(pool) == my_data->descriptorPoolMap.end()) {
3395        return NULL;
3396    }
3397    return my_data->descriptorPoolMap[pool];
3398}
3399
3400static LAYOUT_NODE *getLayoutNode(layer_data *my_data, const VkDescriptorSetLayout layout) {
3401    if (my_data->descriptorSetLayoutMap.find(layout) == my_data->descriptorSetLayoutMap.end()) {
3402        return NULL;
3403    }
3404    return my_data->descriptorSetLayoutMap[layout];
3405}
3406
3407// Return VK_FALSE if update struct is of valid type, otherwise flag error and return code from callback
3408static VkBool32 validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3409    switch (pUpdateStruct->sType) {
3410    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3411    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3412        return VK_FALSE;
3413    default:
3414        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3415                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3416                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3417                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3418    }
3419}
3420
3421// Set count for given update struct in the last parameter
3422// Return value of skipCall, which is only VK_TRUE if error occurs and callback signals execution to cease
3423static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3424    switch (pUpdateStruct->sType) {
3425    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3426        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3427    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3428        // TODO : Need to understand this case better and make sure code is correct
3429        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3430    default:
3431        return 0;
3432    }
3433    return 0;
3434}
3435
3436// For given Layout Node and binding, return index where that binding begins
3437static uint32_t getBindingStartIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) {
3438    uint32_t offsetIndex = 0;
3439    for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
3440        if (pLayout->createInfo.pBindings[i].binding == binding)
3441            break;
3442        offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount;
3443    }
3444    return offsetIndex;
3445}
3446
3447// For given layout node and binding, return last index that is updated
3448static uint32_t getBindingEndIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) {
3449    uint32_t offsetIndex = 0;
3450    for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
3451        offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount;
3452        if (pLayout->createInfo.pBindings[i].binding == binding)
3453            break;
3454    }
3455    return offsetIndex - 1;
3456}
3457
3458// For given layout and update, return the first overall index of the layout that is updated
3459static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding,
3460                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3461    return getBindingStartIndex(pLayout, binding) + arrayIndex;
3462}
3463
3464// For given layout and update, return the last overall index of the layout that is updated
3465static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding,
3466                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3467    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3468    return getBindingStartIndex(pLayout, binding) + arrayIndex + count - 1;
3469}
3470
3471// Verify that the descriptor type in the update struct matches what's expected by the layout
3472static VkBool32 validateUpdateConsistency(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout,
3473                                          const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3474    // First get actual type of update
3475    VkBool32 skipCall = VK_FALSE;
3476    VkDescriptorType actualType;
3477    uint32_t i = 0;
3478    switch (pUpdateStruct->sType) {
3479    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3480        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3481        break;
3482    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3483        /* no need to validate */
3484        return VK_FALSE;
3485        break;
3486    default:
3487        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3488                            DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3489                            "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3490                            string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3491    }
3492    if (VK_FALSE == skipCall) {
3493        // Set first stageFlags as reference and verify that all other updates match it
3494        VkShaderStageFlags refStageFlags = pLayout->stageFlags[startIndex];
3495        for (i = startIndex; i <= endIndex; i++) {
3496            if (pLayout->descriptorTypes[i] != actualType) {
3497                skipCall |= log_msg(
3498                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3499                    DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3500                    "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3501                    string_VkDescriptorType(actualType), string_VkDescriptorType(pLayout->descriptorTypes[i]));
3502            }
3503            if (pLayout->stageFlags[i] != refStageFlags) {
3504                skipCall |= log_msg(
3505                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3506                    DRAWSTATE_DESCRIPTOR_STAGEFLAGS_MISMATCH, "DS",
3507                    "Write descriptor update has stageFlags %x that do not match overlapping binding descriptor stageFlags of %x!",
3508                    refStageFlags, pLayout->stageFlags[i]);
3509            }
3510        }
3511    }
3512    return skipCall;
3513}
3514
3515// Determine the update type, allocate a new struct of that type, shadow the given pUpdate
3516//   struct into the pNewNode param. Return VK_TRUE if error condition encountered and callback signals early exit.
3517// NOTE : Calls to this function should be wrapped in mutex
3518static VkBool32 shadowUpdateNode(layer_data *my_data, const VkDevice device, GENERIC_HEADER *pUpdate, GENERIC_HEADER **pNewNode) {
3519    VkBool32 skipCall = VK_FALSE;
3520    VkWriteDescriptorSet *pWDS = NULL;
3521    VkCopyDescriptorSet *pCDS = NULL;
3522    switch (pUpdate->sType) {
3523    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3524        pWDS = new VkWriteDescriptorSet;
3525        *pNewNode = (GENERIC_HEADER *)pWDS;
3526        memcpy(pWDS, pUpdate, sizeof(VkWriteDescriptorSet));
3527
3528        switch (pWDS->descriptorType) {
3529        case VK_DESCRIPTOR_TYPE_SAMPLER:
3530        case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3531        case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3532        case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
3533            VkDescriptorImageInfo *info = new VkDescriptorImageInfo[pWDS->descriptorCount];
3534            memcpy(info, pWDS->pImageInfo, pWDS->descriptorCount * sizeof(VkDescriptorImageInfo));
3535            pWDS->pImageInfo = info;
3536        } break;
3537        case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3538        case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
3539            VkBufferView *info = new VkBufferView[pWDS->descriptorCount];
3540            memcpy(info, pWDS->pTexelBufferView, pWDS->descriptorCount * sizeof(VkBufferView));
3541            pWDS->pTexelBufferView = info;
3542        } break;
3543        case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3544        case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3545        case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3546        case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
3547            VkDescriptorBufferInfo *info = new VkDescriptorBufferInfo[pWDS->descriptorCount];
3548            memcpy(info, pWDS->pBufferInfo, pWDS->descriptorCount * sizeof(VkDescriptorBufferInfo));
3549            pWDS->pBufferInfo = info;
3550        } break;
3551        default:
3552            return VK_ERROR_VALIDATION_FAILED_EXT;
3553            break;
3554        }
3555        break;
3556    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3557        pCDS = new VkCopyDescriptorSet;
3558        *pNewNode = (GENERIC_HEADER *)pCDS;
3559        memcpy(pCDS, pUpdate, sizeof(VkCopyDescriptorSet));
3560        break;
3561    default:
3562        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3563                    DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3564                    "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3565                    string_VkStructureType(pUpdate->sType), pUpdate->sType))
3566            return VK_TRUE;
3567    }
3568    // Make sure that pNext for the end of shadow copy is NULL
3569    (*pNewNode)->pNext = NULL;
3570    return skipCall;
3571}
3572
3573// Verify that given sampler is valid
3574static VkBool32 validateSampler(const layer_data *my_data, const VkSampler *pSampler, const VkBool32 immutable) {
3575    VkBool32 skipCall = VK_FALSE;
3576    auto sampIt = my_data->sampleMap.find(*pSampler);
3577    if (sampIt == my_data->sampleMap.end()) {
3578        if (!immutable) {
3579            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3580                                (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3581                                "vkUpdateDescriptorSets: Attempt to update descriptor with invalid sampler %#" PRIxLEAST64,
3582                                (uint64_t)*pSampler);
3583        } else { // immutable
3584            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3585                                (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3586                                "vkUpdateDescriptorSets: Attempt to update descriptor whose binding has an invalid immutable "
3587                                "sampler %#" PRIxLEAST64,
3588                                (uint64_t)*pSampler);
3589        }
3590    } else {
3591        // TODO : Any further checks we want to do on the sampler?
3592    }
3593    return skipCall;
3594}
3595
3596// find layout(s) on the cmd buf level
3597bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3598    ImageSubresourcePair imgpair = {image, true, range};
3599    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3600    if (imgsubIt == pCB->imageLayoutMap.end()) {
3601        imgpair = {image, false, VkImageSubresource()};
3602        imgsubIt = pCB->imageLayoutMap.find(imgpair);
3603        if (imgsubIt == pCB->imageLayoutMap.end())
3604            return false;
3605    }
3606    node = imgsubIt->second;
3607    return true;
3608}
3609
3610// find layout(s) on the global level
3611bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3612    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3613    if (imgsubIt == my_data->imageLayoutMap.end()) {
3614        imgpair = {imgpair.image, false, VkImageSubresource()};
3615        imgsubIt = my_data->imageLayoutMap.find(imgpair);
3616        if (imgsubIt == my_data->imageLayoutMap.end())
3617            return false;
3618    }
3619    layout = imgsubIt->second.layout;
3620    return true;
3621}
3622
3623bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3624    ImageSubresourcePair imgpair = {image, true, range};
3625    return FindLayout(my_data, imgpair, layout);
3626}
3627
3628bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3629    auto sub_data = my_data->imageSubresourceMap.find(image);
3630    if (sub_data == my_data->imageSubresourceMap.end())
3631        return false;
3632    auto imgIt = my_data->imageMap.find(image);
3633    if (imgIt == my_data->imageMap.end())
3634        return false;
3635    bool ignoreGlobal = false;
3636    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3637    // potential errors in this case.
3638    if (sub_data->second.size() >= (imgIt->second.createInfo.arrayLayers * imgIt->second.createInfo.mipLevels + 1)) {
3639        ignoreGlobal = true;
3640    }
3641    for (auto imgsubpair : sub_data->second) {
3642        if (ignoreGlobal && !imgsubpair.hasSubresource)
3643            continue;
3644        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3645        if (img_data != my_data->imageLayoutMap.end()) {
3646            layouts.push_back(img_data->second.layout);
3647        }
3648    }
3649    return true;
3650}
3651
3652// Set the layout on the global level
3653void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3654    VkImage &image = imgpair.image;
3655    // TODO (mlentine): Maybe set format if new? Not used atm.
3656    my_data->imageLayoutMap[imgpair].layout = layout;
3657    // TODO (mlentine): Maybe make vector a set?
3658    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3659    if (subresource == my_data->imageSubresourceMap[image].end()) {
3660        my_data->imageSubresourceMap[image].push_back(imgpair);
3661    }
3662}
3663
3664void SetLayout(layer_data *my_data, VkImage image, const VkImageLayout &layout) {
3665    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3666    SetLayout(my_data, imgpair, layout);
3667}
3668
3669void SetLayout(layer_data *my_data, VkImage image, VkImageSubresource range, const VkImageLayout &layout) {
3670    ImageSubresourcePair imgpair = {image, true, range};
3671    SetLayout(my_data, imgpair, layout);
3672}
3673
3674// Set the layout on the cmdbuf level
3675void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3676    pCB->imageLayoutMap[imgpair] = node;
3677    // TODO (mlentine): Maybe make vector a set?
3678    auto subresource = std::find(pCB->imageSubresourceMap[image].begin(), pCB->imageSubresourceMap[image].end(), imgpair);
3679    if (subresource == pCB->imageSubresourceMap[image].end()) {
3680        pCB->imageSubresourceMap[image].push_back(imgpair);
3681    }
3682}
3683
3684void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3685    // TODO (mlentine): Maybe make vector a set?
3686    if (std::find(pCB->imageSubresourceMap[image].begin(), pCB->imageSubresourceMap[image].end(), imgpair) !=
3687        pCB->imageSubresourceMap[image].end()) {
3688        pCB->imageLayoutMap[imgpair].layout = layout;
3689    } else {
3690        // TODO (mlentine): Could be expensive and might need to be removed.
3691        assert(imgpair.hasSubresource);
3692        IMAGE_CMD_BUF_LAYOUT_NODE node;
3693        FindLayout(pCB, image, imgpair.subresource, node);
3694        SetLayout(pCB, image, imgpair, {node.initialLayout, layout});
3695    }
3696}
3697
3698void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3699    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3700    SetLayout(pCB, image, imgpair, node);
3701}
3702
3703void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3704    ImageSubresourcePair imgpair = {image, true, range};
3705    SetLayout(pCB, image, imgpair, node);
3706}
3707
3708void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, const VkImageLayout &layout) {
3709    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3710    SetLayout(pCB, image, imgpair, layout);
3711}
3712
3713void SetLayout(GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, const VkImageLayout &layout) {
3714    ImageSubresourcePair imgpair = {image, true, range};
3715    SetLayout(pCB, image, imgpair, layout);
3716}
3717
3718void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3719    auto image_view_data = dev_data->imageViewMap.find(imageView);
3720    assert(image_view_data != dev_data->imageViewMap.end());
3721    const VkImage &image = image_view_data->second.image;
3722    const VkImageSubresourceRange &subRange = image_view_data->second.subresourceRange;
3723    // TODO: Do not iterate over every possibility - consolidate where possible
3724    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3725        uint32_t level = subRange.baseMipLevel + j;
3726        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3727            uint32_t layer = subRange.baseArrayLayer + k;
3728            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3729            SetLayout(pCB, image, sub, layout);
3730        }
3731    }
3732}
3733
3734// Verify that given imageView is valid
3735static VkBool32 validateImageView(const layer_data *my_data, const VkImageView *pImageView, const VkImageLayout imageLayout) {
3736    VkBool32 skipCall = VK_FALSE;
3737    auto ivIt = my_data->imageViewMap.find(*pImageView);
3738    if (ivIt == my_data->imageViewMap.end()) {
3739        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3740                            (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3741                            "vkUpdateDescriptorSets: Attempt to update descriptor with invalid imageView %#" PRIxLEAST64,
3742                            (uint64_t)*pImageView);
3743    } else {
3744        // Validate that imageLayout is compatible with aspectMask and image format
3745        VkImageAspectFlags aspectMask = ivIt->second.subresourceRange.aspectMask;
3746        VkImage image = ivIt->second.image;
3747        // TODO : Check here in case we have a bad image
3748        VkFormat format = VK_FORMAT_MAX_ENUM;
3749        auto imgIt = my_data->imageMap.find(image);
3750        if (imgIt != my_data->imageMap.end()) {
3751            format = (*imgIt).second.createInfo.format;
3752        } else {
3753            // Also need to check the swapchains.
3754            auto swapchainIt = my_data->device_extensions.imageToSwapchainMap.find(image);
3755            if (swapchainIt != my_data->device_extensions.imageToSwapchainMap.end()) {
3756                VkSwapchainKHR swapchain = swapchainIt->second;
3757                auto swapchain_nodeIt = my_data->device_extensions.swapchainMap.find(swapchain);
3758                if (swapchain_nodeIt != my_data->device_extensions.swapchainMap.end()) {
3759                    SWAPCHAIN_NODE *pswapchain_node = swapchain_nodeIt->second;
3760                    format = pswapchain_node->createInfo.imageFormat;
3761                }
3762            }
3763        }
3764        if (format == VK_FORMAT_MAX_ENUM) {
3765            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3766                                (uint64_t)image, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3767                                "vkUpdateDescriptorSets: Attempt to update descriptor with invalid image %#" PRIxLEAST64
3768                                " in imageView %#" PRIxLEAST64,
3769                                (uint64_t)image, (uint64_t)*pImageView);
3770        } else {
3771            VkBool32 ds = vk_format_is_depth_or_stencil(format);
3772            switch (imageLayout) {
3773            case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
3774                // Only Color bit must be set
3775                if ((aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
3776                    skipCall |=
3777                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3778                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3779                                "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3780                                "and imageView %#" PRIxLEAST64 ""
3781                                " that does not have VK_IMAGE_ASPECT_COLOR_BIT set.",
3782                                (uint64_t)*pImageView);
3783                }
3784                // format must NOT be DS
3785                if (ds) {
3786                    skipCall |=
3787                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3788                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3789                                "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3790                                "and imageView %#" PRIxLEAST64 ""
3791                                " but the image format is %s which is not a color format.",
3792                                (uint64_t)*pImageView, string_VkFormat(format));
3793                }
3794                break;
3795            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
3796            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
3797                // Depth or stencil bit must be set, but both must NOT be set
3798                if (aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
3799                    if (aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
3800                        // both  must NOT be set
3801                        skipCall |=
3802                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3803                                    (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3804                                    "vkUpdateDescriptorSets: Updating descriptor with imageView %#" PRIxLEAST64 ""
3805                                    " that has both STENCIL and DEPTH aspects set",
3806                                    (uint64_t)*pImageView);
3807                    }
3808                } else if (!(aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
3809                    // Neither were set
3810                    skipCall |=
3811                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3812                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3813                                "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3814                                " that does not have STENCIL or DEPTH aspect set.",
3815                                string_VkImageLayout(imageLayout), (uint64_t)*pImageView);
3816                }
3817                // format must be DS
3818                if (!ds) {
3819                    skipCall |=
3820                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3821                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3822                                "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3823                                " but the image format is %s which is not a depth/stencil format.",
3824                                string_VkImageLayout(imageLayout), (uint64_t)*pImageView, string_VkFormat(format));
3825                }
3826                break;
3827            default:
3828                // anything to check for other layouts?
3829                break;
3830            }
3831        }
3832    }
3833    return skipCall;
3834}
3835
3836// Verify that given bufferView is valid
3837static VkBool32 validateBufferView(const layer_data *my_data, const VkBufferView *pBufferView) {
3838    VkBool32 skipCall = VK_FALSE;
3839    auto sampIt = my_data->bufferViewMap.find(*pBufferView);
3840    if (sampIt == my_data->bufferViewMap.end()) {
3841        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT,
3842                            (uint64_t)*pBufferView, __LINE__, DRAWSTATE_BUFFERVIEW_DESCRIPTOR_ERROR, "DS",
3843                            "vkUpdateDescriptorSets: Attempt to update descriptor with invalid bufferView %#" PRIxLEAST64,
3844                            (uint64_t)*pBufferView);
3845    } else {
3846        // TODO : Any further checks we want to do on the bufferView?
3847    }
3848    return skipCall;
3849}
3850
3851// Verify that given bufferInfo is valid
3852static VkBool32 validateBufferInfo(const layer_data *my_data, const VkDescriptorBufferInfo *pBufferInfo) {
3853    VkBool32 skipCall = VK_FALSE;
3854    auto sampIt = my_data->bufferMap.find(pBufferInfo->buffer);
3855    if (sampIt == my_data->bufferMap.end()) {
3856        skipCall |=
3857            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3858                    (uint64_t)pBufferInfo->buffer, __LINE__, DRAWSTATE_BUFFERINFO_DESCRIPTOR_ERROR, "DS",
3859                    "vkUpdateDescriptorSets: Attempt to update descriptor where bufferInfo has invalid buffer %#" PRIxLEAST64,
3860                    (uint64_t)pBufferInfo->buffer);
3861    } else {
3862        // TODO : Any further checks we want to do on the bufferView?
3863    }
3864    return skipCall;
3865}
3866
3867static VkBool32 validateUpdateContents(const layer_data *my_data, const VkWriteDescriptorSet *pWDS,
3868                                       const VkDescriptorSetLayoutBinding *pLayoutBinding) {
3869    VkBool32 skipCall = VK_FALSE;
3870    // First verify that for the given Descriptor type, the correct DescriptorInfo data is supplied
3871    const VkSampler *pSampler = NULL;
3872    VkBool32 immutable = VK_FALSE;
3873    uint32_t i = 0;
3874    // For given update type, verify that update contents are correct
3875    switch (pWDS->descriptorType) {
3876    case VK_DESCRIPTOR_TYPE_SAMPLER:
3877        for (i = 0; i < pWDS->descriptorCount; ++i) {
3878            skipCall |= validateSampler(my_data, &(pWDS->pImageInfo[i].sampler), immutable);
3879        }
3880        break;
3881    case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3882        for (i = 0; i < pWDS->descriptorCount; ++i) {
3883            if (NULL == pLayoutBinding->pImmutableSamplers) {
3884                pSampler = &(pWDS->pImageInfo[i].sampler);
3885                if (immutable) {
3886                    skipCall |= log_msg(
3887                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3888                        (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3889                        "vkUpdateDescriptorSets: Update #%u is not an immutable sampler %#" PRIxLEAST64
3890                        ", but previous update(s) from this "
3891                        "VkWriteDescriptorSet struct used an immutable sampler. All updates from a single struct must either "
3892                        "use immutable or non-immutable samplers.",
3893                        i, (uint64_t)*pSampler);
3894                }
3895            } else {
3896                if (i > 0 && !immutable) {
3897                    skipCall |= log_msg(
3898                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3899                        (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3900                        "vkUpdateDescriptorSets: Update #%u is an immutable sampler, but previous update(s) from this "
3901                        "VkWriteDescriptorSet struct used a non-immutable sampler. All updates from a single struct must either "
3902                        "use immutable or non-immutable samplers.",
3903                        i);
3904                }
3905                immutable = VK_TRUE;
3906                pSampler = &(pLayoutBinding->pImmutableSamplers[i]);
3907            }
3908            skipCall |= validateSampler(my_data, pSampler, immutable);
3909        }
3910    // Intentionally fall through here to also validate image stuff
3911    case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3912    case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
3913    case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
3914        for (i = 0; i < pWDS->descriptorCount; ++i) {
3915            skipCall |= validateImageView(my_data, &(pWDS->pImageInfo[i].imageView), pWDS->pImageInfo[i].imageLayout);
3916        }
3917        break;
3918    case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3919    case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
3920        for (i = 0; i < pWDS->descriptorCount; ++i) {
3921            skipCall |= validateBufferView(my_data, &(pWDS->pTexelBufferView[i]));
3922        }
3923        break;
3924    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3925    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3926    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3927    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
3928        for (i = 0; i < pWDS->descriptorCount; ++i) {
3929            skipCall |= validateBufferInfo(my_data, &(pWDS->pBufferInfo[i]));
3930        }
3931        break;
3932    default:
3933        break;
3934    }
3935    return skipCall;
3936}
3937// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3938// func_str is the name of the calling function
3939// Return VK_FALSE if no errors occur
3940// Return VK_TRUE if validation error occurs and callback returns VK_TRUE (to skip upcoming API call down the chain)
3941VkBool32 validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
3942    VkBool32 skip_call = VK_FALSE;
3943    auto set_node = my_data->setMap.find(set);
3944    if (set_node == my_data->setMap.end()) {
3945        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3946                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3947                             "Cannot call %s() on descriptor set %" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3948                             (uint64_t)(set));
3949    } else {
3950        if (set_node->second->in_use.load()) {
3951            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3952                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
3953                                 "DS", "Cannot call %s() on descriptor set %" PRIxLEAST64 " that is in use by a command buffer.",
3954                                 func_str.c_str(), (uint64_t)(set));
3955        }
3956    }
3957    return skip_call;
3958}
3959static void invalidateBoundCmdBuffers(layer_data *dev_data, const SET_NODE *pSet) {
3960    // Flag any CBs this set is bound to as INVALID
3961    for (auto cb : pSet->boundCmdBuffers) {
3962        auto cb_node = dev_data->commandBufferMap.find(cb);
3963        if (cb_node != dev_data->commandBufferMap.end()) {
3964            cb_node->second->state = CB_INVALID;
3965        }
3966    }
3967}
3968// update DS mappings based on write and copy update arrays
3969static VkBool32 dsUpdate(layer_data *my_data, VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pWDS,
3970                         uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pCDS) {
3971    VkBool32 skipCall = VK_FALSE;
3972
3973    LAYOUT_NODE *pLayout = NULL;
3974    VkDescriptorSetLayoutCreateInfo *pLayoutCI = NULL;
3975    // Validate Write updates
3976    uint32_t i = 0;
3977    for (i = 0; i < descriptorWriteCount; i++) {
3978        VkDescriptorSet ds = pWDS[i].dstSet;
3979        SET_NODE *pSet = my_data->setMap[ds];
3980        // Set being updated cannot be in-flight
3981        if ((skipCall = validateIdleDescriptorSet(my_data, ds, "VkUpdateDescriptorSets")) == VK_TRUE)
3982            return skipCall;
3983        // If set is bound to any cmdBuffers, mark them invalid
3984        invalidateBoundCmdBuffers(my_data, pSet);
3985        GENERIC_HEADER *pUpdate = (GENERIC_HEADER *)&pWDS[i];
3986        pLayout = pSet->pLayout;
3987        // First verify valid update struct
3988        if ((skipCall = validUpdateStruct(my_data, device, pUpdate)) == VK_TRUE) {
3989            break;
3990        }
3991        uint32_t binding = 0, endIndex = 0;
3992        binding = pWDS[i].dstBinding;
3993        auto bindingToIndex = pLayout->bindingToIndexMap.find(binding);
3994        // Make sure that layout being updated has the binding being updated
3995        if (bindingToIndex == pLayout->bindingToIndexMap.end()) {
3996            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3997                                (uint64_t)(ds), __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
3998                                "Descriptor Set %" PRIu64 " does not have binding to match "
3999                                "update binding %u for update type "
4000                                "%s!",
4001                                (uint64_t)(ds), binding, string_VkStructureType(pUpdate->sType));
4002        } else {
4003            // Next verify that update falls within size of given binding
4004            endIndex = getUpdateEndIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate);
4005            if (getBindingEndIndex(pLayout, binding) < endIndex) {
4006                pLayoutCI = &pLayout->createInfo;
4007                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
4008                skipCall |=
4009                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4010                            (uint64_t)(ds), __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4011                            "Descriptor update type of %s is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4012                            string_VkStructureType(pUpdate->sType), binding, DSstr.c_str());
4013            } else { // TODO : should we skip update on a type mismatch or force it?
4014                uint32_t startIndex;
4015                startIndex = getUpdateStartIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate);
4016                // Layout bindings match w/ update, now verify that update type
4017                // & stageFlags are the same for entire update
4018                if ((skipCall = validateUpdateConsistency(my_data, device, pLayout, pUpdate, startIndex, endIndex)) == VK_FALSE) {
4019                    // The update is within bounds and consistent, but need to
4020                    // make sure contents make sense as well
4021                    if ((skipCall = validateUpdateContents(my_data, &pWDS[i],
4022                                                           &pLayout->createInfo.pBindings[bindingToIndex->second])) == VK_FALSE) {
4023                        // Update is good. Save the update info
4024                        // Create new update struct for this set's shadow copy
4025                        GENERIC_HEADER *pNewNode = NULL;
4026                        skipCall |= shadowUpdateNode(my_data, device, pUpdate, &pNewNode);
4027                        if (NULL == pNewNode) {
4028                            skipCall |= log_msg(
4029                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4030                                (uint64_t)(ds), __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
4031                                "Out of memory while attempting to allocate UPDATE struct in vkUpdateDescriptors()");
4032                        } else {
4033                            // Insert shadow node into LL of updates for this set
4034                            pNewNode->pNext = pSet->pUpdateStructs;
4035                            pSet->pUpdateStructs = pNewNode;
4036                            // Now update appropriate descriptor(s) to point to new Update node
4037                            for (uint32_t j = startIndex; j <= endIndex; j++) {
4038                                assert(j < pSet->descriptorCount);
4039                                pSet->ppDescriptors[j] = pNewNode;
4040                            }
4041                        }
4042                    }
4043                }
4044            }
4045        }
4046    }
4047    // Now validate copy updates
4048    for (i = 0; i < descriptorCopyCount; ++i) {
4049        SET_NODE *pSrcSet = NULL, *pDstSet = NULL;
4050        LAYOUT_NODE *pSrcLayout = NULL, *pDstLayout = NULL;
4051        uint32_t srcStartIndex = 0, srcEndIndex = 0, dstStartIndex = 0, dstEndIndex = 0;
4052        // For each copy make sure that update falls within given layout and that types match
4053        pSrcSet = my_data->setMap[pCDS[i].srcSet];
4054        pDstSet = my_data->setMap[pCDS[i].dstSet];
4055        // Set being updated cannot be in-flight
4056        if ((skipCall = validateIdleDescriptorSet(my_data, pDstSet->set, "VkUpdateDescriptorSets")) == VK_TRUE)
4057            return skipCall;
4058        invalidateBoundCmdBuffers(my_data, pDstSet);
4059        pSrcLayout = pSrcSet->pLayout;
4060        pDstLayout = pDstSet->pLayout;
4061        // Validate that src binding is valid for src set layout
4062        if (pSrcLayout->bindingToIndexMap.find(pCDS[i].srcBinding) == pSrcLayout->bindingToIndexMap.end()) {
4063            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4064                                (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
4065                                "Copy descriptor update %u has srcBinding %u "
4066                                "which is out of bounds for underlying SetLayout "
4067                                "%#" PRIxLEAST64 " which only has bindings 0-%u.",
4068                                i, pCDS[i].srcBinding, (uint64_t)pSrcLayout->layout, pSrcLayout->createInfo.bindingCount - 1);
4069        } else if (pDstLayout->bindingToIndexMap.find(pCDS[i].dstBinding) == pDstLayout->bindingToIndexMap.end()) {
4070            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4071                                (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
4072                                "Copy descriptor update %u has dstBinding %u "
4073                                "which is out of bounds for underlying SetLayout "
4074                                "%#" PRIxLEAST64 " which only has bindings 0-%u.",
4075                                i, pCDS[i].dstBinding, (uint64_t)pDstLayout->layout, pDstLayout->createInfo.bindingCount - 1);
4076        } else {
4077            // Proceed with validation. Bindings are ok, but make sure update is within bounds of given layout
4078            srcEndIndex = getUpdateEndIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement,
4079                                            (const GENERIC_HEADER *)&(pCDS[i]));
4080            dstEndIndex = getUpdateEndIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement,
4081                                            (const GENERIC_HEADER *)&(pCDS[i]));
4082            if (getBindingEndIndex(pSrcLayout, pCDS[i].srcBinding) < srcEndIndex) {
4083                pLayoutCI = &pSrcLayout->createInfo;
4084                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
4085                skipCall |=
4086                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4087                            (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4088                            "Copy descriptor src update is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4089                            pCDS[i].srcBinding, DSstr.c_str());
4090            } else if (getBindingEndIndex(pDstLayout, pCDS[i].dstBinding) < dstEndIndex) {
4091                pLayoutCI = &pDstLayout->createInfo;
4092                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
4093                skipCall |=
4094                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4095                            (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4096                            "Copy descriptor dest update is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4097                            pCDS[i].dstBinding, DSstr.c_str());
4098            } else {
4099                srcStartIndex = getUpdateStartIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement,
4100                                                    (const GENERIC_HEADER *)&(pCDS[i]));
4101                dstStartIndex = getUpdateStartIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement,
4102                                                    (const GENERIC_HEADER *)&(pCDS[i]));
4103                for (uint32_t j = 0; j < pCDS[i].descriptorCount; ++j) {
4104                    // For copy just make sure that the types match and then perform the update
4105                    if (pSrcLayout->descriptorTypes[srcStartIndex + j] != pDstLayout->descriptorTypes[dstStartIndex + j]) {
4106                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4107                                            __LINE__, DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
4108                                            "Copy descriptor update index %u, update count #%u, has src update descriptor type %s "
4109                                            "that does not match overlapping dest descriptor type of %s!",
4110                                            i, j + 1, string_VkDescriptorType(pSrcLayout->descriptorTypes[srcStartIndex + j]),
4111                                            string_VkDescriptorType(pDstLayout->descriptorTypes[dstStartIndex + j]));
4112                    } else {
4113                        // point dst descriptor at corresponding src descriptor
4114                        // TODO : This may be a hole. I believe copy should be its own copy,
4115                        //  otherwise a subsequent write update to src will incorrectly affect the copy
4116                        pDstSet->ppDescriptors[j + dstStartIndex] = pSrcSet->ppDescriptors[j + srcStartIndex];
4117                        pDstSet->pUpdateStructs = pSrcSet->pUpdateStructs;
4118                    }
4119                }
4120            }
4121        }
4122    }
4123    return skipCall;
4124}
4125
4126// Verify that given pool has descriptors that are being requested for allocation
4127static VkBool32 validate_descriptor_availability_in_pool(layer_data *dev_data, DESCRIPTOR_POOL_NODE *pPoolNode, uint32_t count,
4128                                                         const VkDescriptorSetLayout *pSetLayouts) {
4129    VkBool32 skipCall = VK_FALSE;
4130    uint32_t i = 0, j = 0;
4131    for (i = 0; i < count; ++i) {
4132        LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pSetLayouts[i]);
4133        if (NULL == pLayout) {
4134            skipCall |=
4135                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
4136                        (uint64_t)pSetLayouts[i], __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
4137                        "Unable to find set layout node for layout %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
4138                        (uint64_t)pSetLayouts[i]);
4139        } else {
4140            uint32_t typeIndex = 0, poolSizeCount = 0;
4141            for (j = 0; j < pLayout->createInfo.bindingCount; ++j) {
4142                typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType);
4143                poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount;
4144                if (poolSizeCount > pPoolNode->availableDescriptorTypeCount[typeIndex]) {
4145                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4146                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pLayout->layout, __LINE__,
4147                                        DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
4148                                        "Unable to allocate %u descriptors of type %s from pool %#" PRIxLEAST64
4149                                        ". This pool only has %u descriptors of this type remaining.",
4150                                        poolSizeCount, string_VkDescriptorType(pLayout->createInfo.pBindings[j].descriptorType),
4151                                        (uint64_t)pPoolNode->pool, pPoolNode->availableDescriptorTypeCount[typeIndex]);
4152                } else { // Decrement available descriptors of this type
4153                    pPoolNode->availableDescriptorTypeCount[typeIndex] -= poolSizeCount;
4154                }
4155            }
4156        }
4157    }
4158    return skipCall;
4159}
4160
4161// Free the shadowed update node for this Set
4162// NOTE : Calls to this function should be wrapped in mutex
4163static void freeShadowUpdateTree(SET_NODE *pSet) {
4164    GENERIC_HEADER *pShadowUpdate = pSet->pUpdateStructs;
4165    pSet->pUpdateStructs = NULL;
4166    GENERIC_HEADER *pFreeUpdate = pShadowUpdate;
4167    // Clear the descriptor mappings as they will now be invalid
4168    memset(pSet->ppDescriptors, 0, pSet->descriptorCount * sizeof(GENERIC_HEADER *));
4169    while (pShadowUpdate) {
4170        pFreeUpdate = pShadowUpdate;
4171        pShadowUpdate = (GENERIC_HEADER *)pShadowUpdate->pNext;
4172        VkWriteDescriptorSet *pWDS = NULL;
4173        switch (pFreeUpdate->sType) {
4174        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
4175            pWDS = (VkWriteDescriptorSet *)pFreeUpdate;
4176            switch (pWDS->descriptorType) {
4177            case VK_DESCRIPTOR_TYPE_SAMPLER:
4178            case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
4179            case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
4180            case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
4181                delete[] pWDS->pImageInfo;
4182            } break;
4183            case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
4184            case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
4185                delete[] pWDS->pTexelBufferView;
4186            } break;
4187            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
4188            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
4189            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
4190            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
4191                delete[] pWDS->pBufferInfo;
4192            } break;
4193            default:
4194                break;
4195            }
4196            break;
4197        case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
4198            break;
4199        default:
4200            assert(0);
4201            break;
4202        }
4203        delete pFreeUpdate;
4204    }
4205}
4206
4207// Free all DS Pools including their Sets & related sub-structs
4208// NOTE : Calls to this function should be wrapped in mutex
4209static void deletePools(layer_data *my_data) {
4210    if (my_data->descriptorPoolMap.size() <= 0)
4211        return;
4212    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
4213        SET_NODE *pSet = (*ii).second->pSets;
4214        SET_NODE *pFreeSet = pSet;
4215        while (pSet) {
4216            pFreeSet = pSet;
4217            pSet = pSet->pNext;
4218            // Freeing layouts handled in deleteLayouts() function
4219            // Free Update shadow struct tree
4220            freeShadowUpdateTree(pFreeSet);
4221            delete[] pFreeSet->ppDescriptors;
4222            delete pFreeSet;
4223        }
4224        delete (*ii).second;
4225    }
4226    my_data->descriptorPoolMap.clear();
4227}
4228
4229// WARN : Once deleteLayouts() called, any layout ptrs in Pool/Set data structure will be invalid
4230// NOTE : Calls to this function should be wrapped in mutex
4231static void deleteLayouts(layer_data *my_data) {
4232    if (my_data->descriptorSetLayoutMap.size() <= 0)
4233        return;
4234    for (auto ii = my_data->descriptorSetLayoutMap.begin(); ii != my_data->descriptorSetLayoutMap.end(); ++ii) {
4235        LAYOUT_NODE *pLayout = (*ii).second;
4236        if (pLayout->createInfo.pBindings) {
4237            for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
4238                delete[] pLayout->createInfo.pBindings[i].pImmutableSamplers;
4239            }
4240            delete[] pLayout->createInfo.pBindings;
4241        }
4242        delete pLayout;
4243    }
4244    my_data->descriptorSetLayoutMap.clear();
4245}
4246
4247// Currently clearing a set is removing all previous updates to that set
4248//  TODO : Validate if this is correct clearing behavior
4249static void clearDescriptorSet(layer_data *my_data, VkDescriptorSet set) {
4250    SET_NODE *pSet = getSetNode(my_data, set);
4251    if (!pSet) {
4252        // TODO : Return error
4253    } else {
4254        freeShadowUpdateTree(pSet);
4255    }
4256}
4257
4258static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
4259                                VkDescriptorPoolResetFlags flags) {
4260    DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
4261    if (!pPool) {
4262        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4263                (uint64_t)pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
4264                "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t)pool);
4265    } else {
4266        // TODO: validate flags
4267        // For every set off of this pool, clear it
4268        SET_NODE *pSet = pPool->pSets;
4269        while (pSet) {
4270            clearDescriptorSet(my_data, pSet->set);
4271            pSet = pSet->pNext;
4272        }
4273        // Reset available count to max count for this pool
4274        for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
4275            pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
4276        }
4277    }
4278}
4279
4280// For given CB object, fetch associated CB Node from map
4281static GLOBAL_CB_NODE *getCBNode(layer_data *my_data, const VkCommandBuffer cb) {
4282    if (my_data->commandBufferMap.count(cb) == 0) {
4283        // TODO : How to pass cb as srcObj here?
4284        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__,
4285                DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Attempt to use CommandBuffer %#" PRIxLEAST64 " that doesn't exist!",
4286                (uint64_t)(cb));
4287        return NULL;
4288    }
4289    return my_data->commandBufferMap[cb];
4290}
4291
4292// Free all CB Nodes
4293// NOTE : Calls to this function should be wrapped in mutex
4294static void deleteCommandBuffers(layer_data *my_data) {
4295    if (my_data->commandBufferMap.size() <= 0) {
4296        return;
4297    }
4298    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
4299        delete (*ii).second;
4300    }
4301    my_data->commandBufferMap.clear();
4302}
4303
4304static VkBool32 report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
4305    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4306                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
4307                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
4308}
4309
4310VkBool32 validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
4311    if (!pCB->activeRenderPass)
4312        return VK_FALSE;
4313    VkBool32 skip_call = VK_FALSE;
4314    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS && cmd_type != CMD_EXECUTECOMMANDS) {
4315        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4316                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4317                             "Commands cannot be called in a subpass using secondary command buffers.");
4318    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
4319        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4320                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4321                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
4322    }
4323    return skip_call;
4324}
4325
4326static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4327    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
4328        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4329                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4330                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4331    return false;
4332}
4333
4334static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4335    if (!(flags & VK_QUEUE_COMPUTE_BIT))
4336        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4337                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4338                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
4339    return false;
4340}
4341
4342static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4343    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
4344        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4345                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4346                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4347    return false;
4348}
4349
4350// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
4351//  in the recording state or if there's an issue with the Cmd ordering
4352static VkBool32 addCmd(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
4353    VkBool32 skipCall = VK_FALSE;
4354    auto pool_data = my_data->commandPoolMap.find(pCB->createInfo.commandPool);
4355    if (pool_data != my_data->commandPoolMap.end()) {
4356        VkQueueFlags flags = my_data->physDevProperties.queue_family_properties[pool_data->second.queueFamilyIndex].queueFlags;
4357        switch (cmd) {
4358        case CMD_BINDPIPELINE:
4359        case CMD_BINDPIPELINEDELTA:
4360        case CMD_BINDDESCRIPTORSETS:
4361        case CMD_FILLBUFFER:
4362        case CMD_CLEARCOLORIMAGE:
4363        case CMD_SETEVENT:
4364        case CMD_RESETEVENT:
4365        case CMD_WAITEVENTS:
4366        case CMD_BEGINQUERY:
4367        case CMD_ENDQUERY:
4368        case CMD_RESETQUERYPOOL:
4369        case CMD_COPYQUERYPOOLRESULTS:
4370        case CMD_WRITETIMESTAMP:
4371            skipCall |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4372            break;
4373        case CMD_SETVIEWPORTSTATE:
4374        case CMD_SETSCISSORSTATE:
4375        case CMD_SETLINEWIDTHSTATE:
4376        case CMD_SETDEPTHBIASSTATE:
4377        case CMD_SETBLENDSTATE:
4378        case CMD_SETDEPTHBOUNDSSTATE:
4379        case CMD_SETSTENCILREADMASKSTATE:
4380        case CMD_SETSTENCILWRITEMASKSTATE:
4381        case CMD_SETSTENCILREFERENCESTATE:
4382        case CMD_BINDINDEXBUFFER:
4383        case CMD_BINDVERTEXBUFFER:
4384        case CMD_DRAW:
4385        case CMD_DRAWINDEXED:
4386        case CMD_DRAWINDIRECT:
4387        case CMD_DRAWINDEXEDINDIRECT:
4388        case CMD_BLITIMAGE:
4389        case CMD_CLEARATTACHMENTS:
4390        case CMD_CLEARDEPTHSTENCILIMAGE:
4391        case CMD_RESOLVEIMAGE:
4392        case CMD_BEGINRENDERPASS:
4393        case CMD_NEXTSUBPASS:
4394        case CMD_ENDRENDERPASS:
4395            skipCall |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
4396            break;
4397        case CMD_DISPATCH:
4398        case CMD_DISPATCHINDIRECT:
4399            skipCall |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4400            break;
4401        case CMD_COPYBUFFER:
4402        case CMD_COPYIMAGE:
4403        case CMD_COPYBUFFERTOIMAGE:
4404        case CMD_COPYIMAGETOBUFFER:
4405        case CMD_CLONEIMAGEDATA:
4406        case CMD_UPDATEBUFFER:
4407        case CMD_PIPELINEBARRIER:
4408        case CMD_EXECUTECOMMANDS:
4409            break;
4410        default:
4411            break;
4412        }
4413    }
4414    if (pCB->state != CB_RECORDING) {
4415        skipCall |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
4416        skipCall |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
4417        CMD_NODE cmdNode = {};
4418        // init cmd node and append to end of cmd LL
4419        cmdNode.cmdNumber = ++pCB->numCmds;
4420        cmdNode.type = cmd;
4421        pCB->cmds.push_back(cmdNode);
4422    }
4423    return skipCall;
4424}
4425// Reset the command buffer state
4426//  Maintain the createInfo and set state to CB_NEW, but clear all other state
4427static void resetCB(layer_data *my_data, const VkCommandBuffer cb) {
4428    GLOBAL_CB_NODE *pCB = my_data->commandBufferMap[cb];
4429    if (pCB) {
4430        pCB->cmds.clear();
4431        // Reset CB state (note that createInfo is not cleared)
4432        pCB->commandBuffer = cb;
4433        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
4434        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
4435        pCB->fence = 0;
4436        pCB->numCmds = 0;
4437        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
4438        pCB->state = CB_NEW;
4439        pCB->submitCount = 0;
4440        pCB->status = 0;
4441        pCB->lastBoundPipeline = 0;
4442        pCB->lastVtxBinding = 0;
4443        pCB->boundVtxBuffers.clear();
4444        pCB->viewports.clear();
4445        pCB->scissors.clear();
4446        pCB->lineWidth = 0;
4447        pCB->depthBiasConstantFactor = 0;
4448        pCB->depthBiasClamp = 0;
4449        pCB->depthBiasSlopeFactor = 0;
4450        memset(pCB->blendConstants, 0, 4 * sizeof(float));
4451        pCB->minDepthBounds = 0;
4452        pCB->maxDepthBounds = 0;
4453        memset(&pCB->front, 0, sizeof(stencil_data));
4454        memset(&pCB->back, 0, sizeof(stencil_data));
4455        pCB->lastBoundDescriptorSet = 0;
4456        pCB->lastBoundPipelineLayout = 0;
4457        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
4458        pCB->activeRenderPass = 0;
4459        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
4460        pCB->activeSubpass = 0;
4461        pCB->framebuffer = 0;
4462        // Before clearing uniqueBoundSets, remove this CB off of its boundCBs
4463        for (auto set : pCB->uniqueBoundSets) {
4464            auto set_node = my_data->setMap.find(set);
4465            if (set_node != my_data->setMap.end()) {
4466                set_node->second->boundCmdBuffers.erase(pCB->commandBuffer);
4467            }
4468        }
4469        pCB->uniqueBoundSets.clear();
4470        pCB->destroyedSets.clear();
4471        pCB->updatedSets.clear();
4472        pCB->destroyedFramebuffers.clear();
4473        pCB->boundDescriptorSets.clear();
4474        pCB->waitedEvents.clear();
4475        pCB->semaphores.clear();
4476        pCB->events.clear();
4477        pCB->waitedEventsBeforeQueryReset.clear();
4478        pCB->queryToStateMap.clear();
4479        pCB->activeQueries.clear();
4480        pCB->startedQueries.clear();
4481        pCB->imageLayoutMap.clear();
4482        pCB->eventToStageMap.clear();
4483        pCB->drawData.clear();
4484        pCB->currentDrawData.buffers.clear();
4485        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
4486        pCB->secondaryCommandBuffers.clear();
4487        pCB->dynamicOffsets.clear();
4488    }
4489}
4490
4491// Set PSO-related status bits for CB, including dynamic state set via PSO
4492static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
4493    for (uint32_t i = 0; i < pPipe->cbStateCI.attachmentCount; i++) {
4494        if (0 != pPipe->pAttachments[i].colorWriteMask) {
4495            pCB->status |= CBSTATUS_COLOR_BLEND_WRITE_ENABLE;
4496        }
4497    }
4498    if (pPipe->dsStateCI.depthWriteEnable) {
4499        pCB->status |= CBSTATUS_DEPTH_WRITE_ENABLE;
4500    }
4501    if (pPipe->dsStateCI.stencilTestEnable) {
4502        pCB->status |= CBSTATUS_STENCIL_TEST_ENABLE;
4503    }
4504    // Account for any dynamic state not set via this PSO
4505    if (!pPipe->dynStateCI.dynamicStateCount) { // All state is static
4506        pCB->status = CBSTATUS_ALL;
4507    } else {
4508        // First consider all state on
4509        // Then unset any state that's noted as dynamic in PSO
4510        // Finally OR that into CB statemask
4511        CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
4512        for (uint32_t i = 0; i < pPipe->dynStateCI.dynamicStateCount; i++) {
4513            switch (pPipe->dynStateCI.pDynamicStates[i]) {
4514            case VK_DYNAMIC_STATE_VIEWPORT:
4515                psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET;
4516                break;
4517            case VK_DYNAMIC_STATE_SCISSOR:
4518                psoDynStateMask &= ~CBSTATUS_SCISSOR_SET;
4519                break;
4520            case VK_DYNAMIC_STATE_LINE_WIDTH:
4521                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
4522                break;
4523            case VK_DYNAMIC_STATE_DEPTH_BIAS:
4524                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
4525                break;
4526            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
4527                psoDynStateMask &= ~CBSTATUS_BLEND_SET;
4528                break;
4529            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
4530                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
4531                break;
4532            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
4533                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4534                break;
4535            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4536                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4537                break;
4538            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4539                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4540                break;
4541            default:
4542                // TODO : Flag error here
4543                break;
4544            }
4545        }
4546        pCB->status |= psoDynStateMask;
4547    }
4548}
4549
4550// Print the last bound Gfx Pipeline
4551static VkBool32 printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
4552    VkBool32 skipCall = VK_FALSE;
4553    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4554    if (pCB) {
4555        PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBoundPipeline);
4556        if (!pPipeTrav) {
4557            // nothing to print
4558        } else {
4559            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4560                                __LINE__, DRAWSTATE_NONE, "DS", "%s",
4561                                vk_print_vkgraphicspipelinecreateinfo(&pPipeTrav->graphicsPipelineCI, "{DS}").c_str());
4562        }
4563    }
4564    return skipCall;
4565}
4566
4567// Print details of DS config to stdout
4568static VkBool32 printDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
4569    VkBool32 skipCall = VK_FALSE;
4570    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4571    if (pCB && pCB->lastBoundDescriptorSet) {
4572        SET_NODE *pSet = getSetNode(my_data, pCB->lastBoundDescriptorSet);
4573        DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pSet->pool);
4574        // Print out pool details
4575        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4576                            DRAWSTATE_NONE, "DS", "Details for pool %#" PRIxLEAST64 ".", (uint64_t)pPool->pool);
4577        string poolStr = vk_print_vkdescriptorpoolcreateinfo(&pPool->createInfo, " ");
4578        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4579                            DRAWSTATE_NONE, "DS", "%s", poolStr.c_str());
4580        // Print out set details
4581        char prefix[10];
4582        uint32_t index = 0;
4583        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4584                            DRAWSTATE_NONE, "DS", "Details for descriptor set %#" PRIxLEAST64 ".", (uint64_t)pSet->set);
4585        LAYOUT_NODE *pLayout = pSet->pLayout;
4586        // Print layout details
4587        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4588                            DRAWSTATE_NONE, "DS", "Layout #%u, (object %#" PRIxLEAST64 ") for DS %#" PRIxLEAST64 ".", index + 1,
4589                            (uint64_t)(pLayout->layout), (uint64_t)(pSet->set));
4590        sprintf(prefix, "  [L%u] ", index);
4591        string DSLstr = vk_print_vkdescriptorsetlayoutcreateinfo(&pLayout->createInfo, prefix).c_str();
4592        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4593                            DRAWSTATE_NONE, "DS", "%s", DSLstr.c_str());
4594        index++;
4595        GENERIC_HEADER *pUpdate = pSet->pUpdateStructs;
4596        if (pUpdate) {
4597            skipCall |=
4598                log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4599                        DRAWSTATE_NONE, "DS", "Update Chain [UC] for descriptor set %#" PRIxLEAST64 ":", (uint64_t)pSet->set);
4600            sprintf(prefix, "  [UC] ");
4601            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4602                                __LINE__, DRAWSTATE_NONE, "DS", "%s", dynamic_display(pUpdate, prefix).c_str());
4603            // TODO : If there is a "view" associated with this update, print CI for that view
4604        } else {
4605            if (0 != pSet->descriptorCount) {
4606                skipCall |=
4607                    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4608                            DRAWSTATE_NONE, "DS", "No Update Chain for descriptor set %#" PRIxLEAST64
4609                                                  " which has %u descriptors (vkUpdateDescriptors has not been called)",
4610                            (uint64_t)pSet->set, pSet->descriptorCount);
4611            } else {
4612                skipCall |=
4613                    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4614                            DRAWSTATE_NONE, "DS", "FYI: No descriptors in descriptor set %#" PRIxLEAST64 ".", (uint64_t)pSet->set);
4615            }
4616        }
4617    }
4618    return skipCall;
4619}
4620
4621static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
4622    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4623    if (pCB && pCB->cmds.size() > 0) {
4624        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4625                DRAWSTATE_NONE, "DS", "Cmds in CB %p", (void *)cb);
4626        vector<CMD_NODE> cmds = pCB->cmds;
4627        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
4628            // TODO : Need to pass cb as srcObj here
4629            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4630                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD#%" PRIu64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
4631        }
4632    } else {
4633        // Nothing to print
4634    }
4635}
4636
4637static VkBool32 synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
4638    VkBool32 skipCall = VK_FALSE;
4639    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
4640        return skipCall;
4641    }
4642    skipCall |= printDSConfig(my_data, cb);
4643    skipCall |= printPipeline(my_data, cb);
4644    return skipCall;
4645}
4646
4647// Flags validation error if the associated call is made inside a render pass. The apiName
4648// routine should ONLY be called outside a render pass.
4649static VkBool32 insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4650    VkBool32 inside = VK_FALSE;
4651    if (pCB->activeRenderPass) {
4652        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4653                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
4654                         "%s: It is invalid to issue this call inside an active render pass (%#" PRIxLEAST64 ")", apiName,
4655                         (uint64_t)pCB->activeRenderPass);
4656    }
4657    return inside;
4658}
4659
4660// Flags validation error if the associated call is made outside a render pass. The apiName
4661// routine should ONLY be called inside a render pass.
4662static VkBool32 outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4663    VkBool32 outside = VK_FALSE;
4664    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4665        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4666         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4667        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4668                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
4669                          "%s: This call must be issued inside an active render pass.", apiName);
4670    }
4671    return outside;
4672}
4673
4674static void init_core_validation(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
4675
4676    layer_debug_actions(my_data->report_data, my_data->logging_callback, pAllocator, "lunarg_core_validation");
4677
4678    if (!globalLockInitialized) {
4679        loader_platform_thread_create_mutex(&globalLock);
4680        globalLockInitialized = 1;
4681    }
4682#if MTMERGE
4683    // Zero out memory property data
4684    memset(&memProps, 0, sizeof(VkPhysicalDeviceMemoryProperties));
4685#endif
4686}
4687
4688VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4689vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
4690    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4691
4692    assert(chain_info->u.pLayerInfo);
4693    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4694    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4695    if (fpCreateInstance == NULL)
4696        return VK_ERROR_INITIALIZATION_FAILED;
4697
4698    // Advance the link info for the next element on the chain
4699    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4700
4701    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4702    if (result != VK_SUCCESS)
4703        return result;
4704
4705    layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
4706    my_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
4707    layer_init_instance_dispatch_table(*pInstance, my_data->instance_dispatch_table, fpGetInstanceProcAddr);
4708
4709    my_data->report_data = debug_report_create_instance(my_data->instance_dispatch_table, *pInstance,
4710                                                        pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
4711
4712    init_core_validation(my_data, pAllocator);
4713
4714    ValidateLayerOrdering(*pCreateInfo);
4715
4716    return result;
4717}
4718
4719/* hook DestroyInstance to remove tableInstanceMap entry */
4720VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
4721    // TODOSC : Shouldn't need any customization here
4722    dispatch_key key = get_dispatch_key(instance);
4723    // TBD: Need any locking this early, in case this function is called at the
4724    // same time by more than one thread?
4725    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4726    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
4727    pTable->DestroyInstance(instance, pAllocator);
4728
4729    loader_platform_thread_lock_mutex(&globalLock);
4730    // Clean up logging callback, if any
4731    while (my_data->logging_callback.size() > 0) {
4732        VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
4733        layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
4734        my_data->logging_callback.pop_back();
4735    }
4736
4737    layer_debug_report_destroy_instance(my_data->report_data);
4738    delete my_data->instance_dispatch_table;
4739    layer_data_map.erase(key);
4740    loader_platform_thread_unlock_mutex(&globalLock);
4741    if (layer_data_map.empty()) {
4742        // Release mutex when destroying last instance.
4743        loader_platform_thread_delete_mutex(&globalLock);
4744        globalLockInitialized = 0;
4745    }
4746}
4747
4748static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
4749    uint32_t i;
4750    // TBD: Need any locking, in case this function is called at the same time
4751    // by more than one thread?
4752    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4753    dev_data->device_extensions.wsi_enabled = false;
4754
4755    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4756    PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
4757    pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
4758    pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
4759    pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
4760    pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
4761    pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
4762
4763    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4764        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4765            dev_data->device_extensions.wsi_enabled = true;
4766    }
4767}
4768
4769VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4770                                                              const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4771    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4772
4773    assert(chain_info->u.pLayerInfo);
4774    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4775    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4776    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
4777    if (fpCreateDevice == NULL) {
4778        return VK_ERROR_INITIALIZATION_FAILED;
4779    }
4780
4781    // Advance the link info for the next element on the chain
4782    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4783
4784    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4785    if (result != VK_SUCCESS) {
4786        return result;
4787    }
4788
4789    loader_platform_thread_lock_mutex(&globalLock);
4790    layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
4791    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4792
4793    // Setup device dispatch table
4794    my_device_data->device_dispatch_table = new VkLayerDispatchTable;
4795    layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
4796
4797    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4798    createDeviceRegisterExtensions(pCreateInfo, *pDevice);
4799    // Get physical device limits for this device
4800    my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->physDevProperties.properties));
4801    uint32_t count;
4802    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4803    my_device_data->physDevProperties.queue_family_properties.resize(count);
4804    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
4805        gpu, &count, &my_device_data->physDevProperties.queue_family_properties[0]);
4806    // TODO: device limits should make sure these are compatible
4807    if (pCreateInfo->pEnabledFeatures) {
4808        my_device_data->physDevProperties.features = *pCreateInfo->pEnabledFeatures;
4809    } else {
4810        memset(&my_device_data->physDevProperties.features, 0, sizeof(VkPhysicalDeviceFeatures));
4811    }
4812    loader_platform_thread_unlock_mutex(&globalLock);
4813
4814    ValidateLayerOrdering(*pCreateInfo);
4815
4816    return result;
4817}
4818
4819// prototype
4820static void deleteRenderPasses(layer_data *);
4821VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4822    // TODOSC : Shouldn't need any customization here
4823    dispatch_key key = get_dispatch_key(device);
4824    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4825    // Free all the memory
4826    loader_platform_thread_lock_mutex(&globalLock);
4827    deletePipelines(dev_data);
4828    deleteRenderPasses(dev_data);
4829    deleteCommandBuffers(dev_data);
4830    deletePools(dev_data);
4831    deleteLayouts(dev_data);
4832    dev_data->imageViewMap.clear();
4833    dev_data->imageMap.clear();
4834    dev_data->imageSubresourceMap.clear();
4835    dev_data->imageLayoutMap.clear();
4836    dev_data->bufferViewMap.clear();
4837    dev_data->bufferMap.clear();
4838    loader_platform_thread_unlock_mutex(&globalLock);
4839#if MTMERGE
4840    VkBool32 skipCall = VK_FALSE;
4841    loader_platform_thread_lock_mutex(&globalLock);
4842    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4843            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4844    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4845            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4846    print_mem_list(dev_data, device);
4847    printCBList(dev_data, device);
4848    skipCall = delete_cmd_buf_info_list(dev_data);
4849    // Report any memory leaks
4850    DEVICE_MEM_INFO *pInfo = NULL;
4851    if (dev_data->memObjMap.size() > 0) {
4852        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4853            pInfo = &(*ii).second;
4854            if (pInfo->allocInfo.allocationSize != 0) {
4855                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4856                skipCall |=
4857                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4858                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK,
4859                            "MEM", "Mem Object %" PRIu64 " has not been freed. You should clean up this memory by calling "
4860                                   "vkFreeMemory(%" PRIu64 ") prior to vkDestroyDevice().",
4861                            (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4862            }
4863        }
4864    }
4865    // Queues persist until device is destroyed
4866    delete_queue_info_list(dev_data);
4867    layer_debug_report_destroy_device(device);
4868    loader_platform_thread_unlock_mutex(&globalLock);
4869
4870#if DISPATCH_MAP_DEBUG
4871    fprintf(stderr, "Device: %p, key: %p\n", device, key);
4872#endif
4873    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4874    if (VK_FALSE == skipCall) {
4875        pDisp->DestroyDevice(device, pAllocator);
4876    }
4877#else
4878    dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
4879#endif
4880    delete dev_data->device_dispatch_table;
4881    layer_data_map.erase(key);
4882}
4883
4884#if MTMERGE
4885VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
4886vkGetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties *pMemoryProperties) {
4887    layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
4888    VkLayerInstanceDispatchTable *pInstanceTable = my_data->instance_dispatch_table;
4889    pInstanceTable->GetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties);
4890    memcpy(&memProps, pMemoryProperties, sizeof(VkPhysicalDeviceMemoryProperties));
4891}
4892#endif
4893
4894static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4895
4896VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4897vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
4898    return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
4899}
4900
4901VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4902vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
4903    return util_GetLayerProperties(ARRAY_SIZE(cv_global_layers), cv_global_layers, pCount, pProperties);
4904}
4905
4906// TODO: Why does this exist - can we just use global?
4907static const VkLayerProperties cv_device_layers[] = {{
4908    "VK_LAYER_LUNARG_core_validation", VK_API_VERSION, 1, "LunarG Validation Layer",
4909}};
4910
4911VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
4912                                                                                    const char *pLayerName, uint32_t *pCount,
4913                                                                                    VkExtensionProperties *pProperties) {
4914    if (pLayerName == NULL) {
4915        dispatch_key key = get_dispatch_key(physicalDevice);
4916        layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4917        return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
4918    } else {
4919        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
4920    }
4921}
4922
4923VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4924vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
4925    /* draw_state physical device layers are the same as global */
4926    return util_GetLayerProperties(ARRAY_SIZE(cv_device_layers), cv_device_layers, pCount, pProperties);
4927}
4928
4929// This validates that the initial layout specified in the command buffer for
4930// the IMAGE is the same
4931// as the global IMAGE layout
4932VkBool32 ValidateCmdBufImageLayouts(VkCommandBuffer cmdBuffer) {
4933    VkBool32 skip_call = VK_FALSE;
4934    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
4935    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
4936    for (auto cb_image_data : pCB->imageLayoutMap) {
4937        VkImageLayout imageLayout;
4938        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4939            skip_call |=
4940                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4941                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image %" PRIu64 ".",
4942                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4943        } else {
4944            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4945                // TODO: Set memory invalid which is in mem_tracker currently
4946            } else if (imageLayout != cb_image_data.second.initialLayout) {
4947                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4948                                     VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT,
4949                                     "DS", "Cannot submit cmd buffer using image with layout %s when "
4950                                           "first use is %s.",
4951                                     string_VkImageLayout(imageLayout), string_VkImageLayout(cb_image_data.second.initialLayout));
4952            }
4953            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4954        }
4955    }
4956    return skip_call;
4957}
4958// Track which resources are in-flight by atomically incrementing their "in_use" count
4959VkBool32 validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB) {
4960    VkBool32 skip_call = VK_FALSE;
4961    for (auto drawDataElement : pCB->drawData) {
4962        for (auto buffer : drawDataElement.buffers) {
4963            auto buffer_data = my_data->bufferMap.find(buffer);
4964            if (buffer_data == my_data->bufferMap.end()) {
4965                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4966                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4967                                     "Cannot submit cmd buffer using deleted buffer %" PRIu64 ".", (uint64_t)(buffer));
4968            } else {
4969                buffer_data->second.in_use.fetch_add(1);
4970            }
4971        }
4972    }
4973    for (auto set : pCB->uniqueBoundSets) {
4974        auto setNode = my_data->setMap.find(set);
4975        if (setNode == my_data->setMap.end()) {
4976            skip_call |=
4977                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4978                        (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS",
4979                        "Cannot submit cmd buffer using deleted descriptor set %" PRIu64 ".", (uint64_t)(set));
4980        } else {
4981            setNode->second->in_use.fetch_add(1);
4982        }
4983    }
4984    for (auto semaphore : pCB->semaphores) {
4985        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4986        if (semaphoreNode == my_data->semaphoreMap.end()) {
4987            skip_call |=
4988                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4989                        reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4990                        "Cannot submit cmd buffer using deleted semaphore %" PRIu64 ".", reinterpret_cast<uint64_t &>(semaphore));
4991        } else {
4992            semaphoreNode->second.in_use.fetch_add(1);
4993        }
4994    }
4995    for (auto event : pCB->events) {
4996        auto eventNode = my_data->eventMap.find(event);
4997        if (eventNode == my_data->eventMap.end()) {
4998            skip_call |=
4999                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5000                        reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
5001                        "Cannot submit cmd buffer using deleted event %" PRIu64 ".", reinterpret_cast<uint64_t &>(event));
5002        } else {
5003            eventNode->second.in_use.fetch_add(1);
5004        }
5005    }
5006    return skip_call;
5007}
5008
5009void decrementResources(layer_data *my_data, VkCommandBuffer cmdBuffer) {
5010    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
5011    for (auto drawDataElement : pCB->drawData) {
5012        for (auto buffer : drawDataElement.buffers) {
5013            auto buffer_data = my_data->bufferMap.find(buffer);
5014            if (buffer_data != my_data->bufferMap.end()) {
5015                buffer_data->second.in_use.fetch_sub(1);
5016            }
5017        }
5018    }
5019    for (auto set : pCB->uniqueBoundSets) {
5020        auto setNode = my_data->setMap.find(set);
5021        if (setNode != my_data->setMap.end()) {
5022            setNode->second->in_use.fetch_sub(1);
5023        }
5024    }
5025    for (auto semaphore : pCB->semaphores) {
5026        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
5027        if (semaphoreNode != my_data->semaphoreMap.end()) {
5028            semaphoreNode->second.in_use.fetch_sub(1);
5029        }
5030    }
5031    for (auto event : pCB->events) {
5032        auto eventNode = my_data->eventMap.find(event);
5033        if (eventNode != my_data->eventMap.end()) {
5034            eventNode->second.in_use.fetch_sub(1);
5035        }
5036    }
5037    for (auto queryStatePair : pCB->queryToStateMap) {
5038        my_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
5039    }
5040    for (auto eventStagePair : pCB->eventToStageMap) {
5041        my_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
5042    }
5043}
5044
5045void decrementResources(layer_data *my_data, uint32_t fenceCount, const VkFence *pFences) {
5046    for (uint32_t i = 0; i < fenceCount; ++i) {
5047        auto fence_data = my_data->fenceMap.find(pFences[i]);
5048        if (fence_data == my_data->fenceMap.end() || !fence_data->second.needsSignaled)
5049            return;
5050        fence_data->second.needsSignaled = false;
5051        fence_data->second.in_use.fetch_sub(1);
5052        decrementResources(my_data, fence_data->second.priorFences.size(), &fence_data->second.priorFences[0]);
5053        for (auto cmdBuffer : fence_data->second.cmdBuffers) {
5054            decrementResources(my_data, cmdBuffer);
5055        }
5056    }
5057}
5058
5059void decrementResources(layer_data *my_data, VkQueue queue) {
5060    auto queue_data = my_data->queueMap.find(queue);
5061    if (queue_data != my_data->queueMap.end()) {
5062        for (auto cmdBuffer : queue_data->second.untrackedCmdBuffers) {
5063            decrementResources(my_data, cmdBuffer);
5064        }
5065        queue_data->second.untrackedCmdBuffers.clear();
5066        decrementResources(my_data, queue_data->second.lastFences.size(), &queue_data->second.lastFences[0]);
5067    }
5068}
5069
5070void updateTrackedCommandBuffers(layer_data *dev_data, VkQueue queue, VkQueue other_queue, VkFence fence) {
5071    if (queue == other_queue) {
5072        return;
5073    }
5074    auto queue_data = dev_data->queueMap.find(queue);
5075    auto other_queue_data = dev_data->queueMap.find(other_queue);
5076    if (queue_data == dev_data->queueMap.end() || other_queue_data == dev_data->queueMap.end()) {
5077        return;
5078    }
5079    for (auto fence : other_queue_data->second.lastFences) {
5080        queue_data->second.lastFences.push_back(fence);
5081    }
5082    if (fence != VK_NULL_HANDLE) {
5083        auto fence_data = dev_data->fenceMap.find(fence);
5084        if (fence_data == dev_data->fenceMap.end()) {
5085            return;
5086        }
5087        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
5088            fence_data->second.cmdBuffers.push_back(cmdbuffer);
5089        }
5090        other_queue_data->second.untrackedCmdBuffers.clear();
5091    } else {
5092        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
5093            queue_data->second.untrackedCmdBuffers.push_back(cmdbuffer);
5094        }
5095        other_queue_data->second.untrackedCmdBuffers.clear();
5096    }
5097}
5098
5099void trackCommandBuffers(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
5100    auto queue_data = my_data->queueMap.find(queue);
5101    if (fence != VK_NULL_HANDLE) {
5102        vector<VkFence> prior_fences;
5103        auto fence_data = my_data->fenceMap.find(fence);
5104        if (fence_data == my_data->fenceMap.end()) {
5105            return;
5106        }
5107        if (queue_data != my_data->queueMap.end()) {
5108            prior_fences = queue_data->second.lastFences;
5109            queue_data->second.lastFences.clear();
5110            queue_data->second.lastFences.push_back(fence);
5111            for (auto cmdbuffer : queue_data->second.untrackedCmdBuffers) {
5112                fence_data->second.cmdBuffers.push_back(cmdbuffer);
5113            }
5114            queue_data->second.untrackedCmdBuffers.clear();
5115        }
5116        fence_data->second.cmdBuffers.clear();
5117        fence_data->second.priorFences = prior_fences;
5118        fence_data->second.needsSignaled = true;
5119        fence_data->second.queue = queue;
5120        fence_data->second.in_use.fetch_add(1);
5121        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5122            const VkSubmitInfo *submit = &pSubmits[submit_idx];
5123            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5124                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5125                    fence_data->second.cmdBuffers.push_back(secondaryCmdBuffer);
5126                }
5127                fence_data->second.cmdBuffers.push_back(submit->pCommandBuffers[i]);
5128            }
5129        }
5130    } else {
5131        if (queue_data != my_data->queueMap.end()) {
5132            for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5133                const VkSubmitInfo *submit = &pSubmits[submit_idx];
5134                for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5135                    for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5136                        queue_data->second.untrackedCmdBuffers.push_back(secondaryCmdBuffer);
5137                    }
5138                    queue_data->second.untrackedCmdBuffers.push_back(submit->pCommandBuffers[i]);
5139                }
5140            }
5141        }
5142    }
5143    if (queue_data != my_data->queueMap.end()) {
5144        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5145            const VkSubmitInfo *submit = &pSubmits[submit_idx];
5146            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5147                // Add cmdBuffers to both the global set and queue set
5148                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5149                    my_data->globalInFlightCmdBuffers.insert(secondaryCmdBuffer);
5150                    queue_data->second.inFlightCmdBuffers.insert(secondaryCmdBuffer);
5151                }
5152                my_data->globalInFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
5153                queue_data->second.inFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
5154            }
5155        }
5156    }
5157}
5158
5159bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5160    bool skip_call = false;
5161    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
5162        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
5163        skip_call |=
5164            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5165                    __LINE__, DRAWSTATE_INVALID_FENCE, "DS", "Command Buffer %#" PRIx64 " is already in use and is not marked "
5166                                                             "for simultaneous use.",
5167                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
5168    }
5169    return skip_call;
5170}
5171
5172static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5173    bool skipCall = false;
5174    // Validate that cmd buffers have been updated
5175    if (CB_RECORDED != pCB->state) {
5176        if (CB_INVALID == pCB->state) {
5177            // Inform app of reason CB invalid
5178            bool causeReported = false;
5179            if (!pCB->destroyedSets.empty()) {
5180                std::stringstream set_string;
5181                for (auto set : pCB->destroyedSets)
5182                    set_string << " " << set;
5183
5184                skipCall |=
5185                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5186                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5187                            "You are submitting command buffer %#" PRIxLEAST64
5188                            " that is invalid because it had the following bound descriptor set(s) destroyed: %s",
5189                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5190                causeReported = true;
5191            }
5192            if (!pCB->updatedSets.empty()) {
5193                std::stringstream set_string;
5194                for (auto set : pCB->updatedSets)
5195                    set_string << " " << set;
5196
5197                skipCall |=
5198                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5199                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5200                            "You are submitting command buffer %#" PRIxLEAST64
5201                            " that is invalid because it had the following bound descriptor set(s) updated: %s",
5202                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5203                causeReported = true;
5204            }
5205            if (!pCB->destroyedFramebuffers.empty()) {
5206                std::stringstream fb_string;
5207                for (auto fb : pCB->destroyedFramebuffers)
5208                    fb_string << " " << fb;
5209
5210                skipCall |=
5211                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5212                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5213                            "You are submitting command buffer %#" PRIxLEAST64 " that is invalid because it had the following "
5214                            "referenced framebuffers destroyed: %s",
5215                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), fb_string.str().c_str());
5216                causeReported = true;
5217            }
5218            // TODO : This is defensive programming to make sure an error is
5219            //  flagged if we hit this INVALID cmd buffer case and none of the
5220            //  above cases are hit. As the number of INVALID cases grows, this
5221            //  code should be updated to seemlessly handle all the cases.
5222            if (!causeReported) {
5223                skipCall |= log_msg(
5224                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5225                    reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5226                    "You are submitting command buffer %#" PRIxLEAST64 " that is invalid due to an unknown cause. Validation "
5227                    "should "
5228                    "be improved to report the exact cause.",
5229                    reinterpret_cast<uint64_t &>(pCB->commandBuffer));
5230            }
5231        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
5232            skipCall |=
5233                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5234                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
5235                        "You must call vkEndCommandBuffer() on CB %#" PRIxLEAST64 " before this call to vkQueueSubmit()!",
5236                        (uint64_t)(pCB->commandBuffer));
5237        }
5238    }
5239    return skipCall;
5240}
5241
5242static VkBool32 validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5243    // Track in-use for resources off of primary and any secondary CBs
5244    VkBool32 skipCall = validateAndIncrementResources(dev_data, pCB);
5245    if (!pCB->secondaryCommandBuffers.empty()) {
5246        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
5247            skipCall |= validateAndIncrementResources(dev_data, dev_data->commandBufferMap[secondaryCmdBuffer]);
5248            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
5249            if (pSubCB->primaryCommandBuffer != pCB->commandBuffer) {
5250                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5251                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5252                        "CB %#" PRIxLEAST64 " was submitted with secondary buffer %#" PRIxLEAST64
5253                        " but that buffer has subsequently been bound to "
5254                        "primary cmd buffer %#" PRIxLEAST64 ".",
5255                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
5256                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
5257            }
5258        }
5259    }
5260    // TODO : Verify if this also needs to be checked for secondary command
5261    //  buffers. If so, this block of code can move to
5262    //   validateCommandBufferState() function. vulkan GL106 filed to clarify
5263    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
5264        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5265                            __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5266                            "CB %#" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
5267                            "set, but has been submitted %#" PRIxLEAST64 " times.",
5268                            (uint64_t)(pCB->commandBuffer), pCB->submitCount);
5269    }
5270    skipCall |= validateCommandBufferState(dev_data, pCB);
5271    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
5272    // on device
5273    skipCall |= validateCommandBufferSimultaneousUse(dev_data, pCB);
5274    return skipCall;
5275}
5276
5277VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5278vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
5279    VkBool32 skipCall = VK_FALSE;
5280    GLOBAL_CB_NODE *pCB = NULL;
5281    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5282    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5283    loader_platform_thread_lock_mutex(&globalLock);
5284#if MTMERGE
5285    // TODO : Need to track fence and clear mem references when fence clears
5286    // MTMTODO : Merge this code with code below to avoid duplicating efforts
5287    MT_CB_INFO *pCBInfo = NULL;
5288    uint64_t fenceId = 0;
5289    skipCall = add_fence_info(dev_data, fence, queue, &fenceId);
5290
5291    print_mem_list(dev_data, queue);
5292    printCBList(dev_data, queue);
5293    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5294        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5295        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5296            pCBInfo = get_cmd_buf_info(dev_data, submit->pCommandBuffers[i]);
5297            if (pCBInfo) {
5298                pCBInfo->fenceId = fenceId;
5299                pCBInfo->lastSubmittedFence = fence;
5300                pCBInfo->lastSubmittedQueue = queue;
5301                for (auto &function : pCBInfo->validate_functions) {
5302                    skipCall |= function();
5303                }
5304            }
5305        }
5306
5307        for (uint32_t i = 0; i < submit->waitSemaphoreCount; i++) {
5308            VkSemaphore sem = submit->pWaitSemaphores[i];
5309
5310            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5311                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_SIGNALLED) {
5312                    skipCall =
5313                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5314                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
5315                                "vkQueueSubmit: Semaphore must be in signaled state before passing to pWaitSemaphores");
5316                }
5317                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_WAIT;
5318            }
5319        }
5320        for (uint32_t i = 0; i < submit->signalSemaphoreCount; i++) {
5321            VkSemaphore sem = submit->pSignalSemaphores[i];
5322
5323            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5324                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
5325                    skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5326                                       VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, (uint64_t)sem, __LINE__, MEMTRACK_NONE,
5327                                       "SEMAPHORE", "vkQueueSubmit: Semaphore must not be currently signaled or in a wait state");
5328                }
5329                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
5330            }
5331        }
5332    }
5333#endif
5334    // First verify that fence is not in use
5335    if ((fence != VK_NULL_HANDLE) && (submitCount != 0) && dev_data->fenceMap[fence].in_use.load()) {
5336        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5337                            (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5338                            "Fence %#" PRIx64 " is already in use by another submission.", (uint64_t)(fence));
5339    }
5340    // Now verify each individual submit
5341    std::unordered_set<VkQueue> processed_other_queues;
5342    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5343        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5344        vector<VkSemaphore> semaphoreList;
5345        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
5346            const VkSemaphore &semaphore = submit->pWaitSemaphores[i];
5347            semaphoreList.push_back(semaphore);
5348            if (dev_data->semaphoreMap[semaphore].signaled) {
5349                dev_data->semaphoreMap[semaphore].signaled = 0;
5350            } else {
5351                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5352                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
5353                                    "DS", "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
5354                                    reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
5355            }
5356            const VkQueue &other_queue = dev_data->semaphoreMap[semaphore].queue;
5357            if (other_queue != VK_NULL_HANDLE && !processed_other_queues.count(other_queue)) {
5358                updateTrackedCommandBuffers(dev_data, queue, other_queue, fence);
5359                processed_other_queues.insert(other_queue);
5360            }
5361        }
5362        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
5363            const VkSemaphore &semaphore = submit->pSignalSemaphores[i];
5364            semaphoreList.push_back(semaphore);
5365            if (dev_data->semaphoreMap[semaphore].signaled) {
5366                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5367                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
5368                                    "DS", "Queue %#" PRIx64 " is signaling semaphore %#" PRIx64
5369                                          " that has already been signaled but not waited on by queue %#" PRIx64 ".",
5370                                    reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
5371                                    reinterpret_cast<uint64_t &>(dev_data->semaphoreMap[semaphore].queue));
5372            } else {
5373                dev_data->semaphoreMap[semaphore].signaled = 1;
5374                dev_data->semaphoreMap[semaphore].queue = queue;
5375            }
5376        }
5377        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5378            skipCall |= ValidateCmdBufImageLayouts(submit->pCommandBuffers[i]);
5379            pCB = getCBNode(dev_data, submit->pCommandBuffers[i]);
5380            pCB->semaphores = semaphoreList;
5381            pCB->submitCount++; // increment submit count
5382            skipCall |= validatePrimaryCommandBufferState(dev_data, pCB);
5383        }
5384    }
5385    // Update cmdBuffer-related data structs and mark fence in-use
5386    trackCommandBuffers(dev_data, queue, submitCount, pSubmits, fence);
5387    loader_platform_thread_unlock_mutex(&globalLock);
5388    if (VK_FALSE == skipCall)
5389        result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence);
5390#if MTMERGE
5391    loader_platform_thread_lock_mutex(&globalLock);
5392    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5393        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5394        for (uint32_t i = 0; i < submit->waitSemaphoreCount; i++) {
5395            VkSemaphore sem = submit->pWaitSemaphores[i];
5396
5397            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5398                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
5399            }
5400        }
5401    }
5402    loader_platform_thread_unlock_mutex(&globalLock);
5403#endif
5404    return result;
5405}
5406
5407#if MTMERGE
5408VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
5409                                                                const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
5410    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5411    VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
5412    // TODO : Track allocations and overall size here
5413    loader_platform_thread_lock_mutex(&globalLock);
5414    add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
5415    print_mem_list(my_data, device);
5416    loader_platform_thread_unlock_mutex(&globalLock);
5417    return result;
5418}
5419
5420VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5421vkFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
5422    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5423
5424    // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
5425    // Before freeing a memory object, an application must ensure the memory object is no longer
5426    // in use by the device—for example by command buffers queued for execution. The memory need
5427    // not yet be unbound from all images and buffers, but any further use of those images or
5428    // buffers (on host or device) for anything other than destroying those objects will result in
5429    // undefined behavior.
5430
5431    loader_platform_thread_lock_mutex(&globalLock);
5432    freeMemObjInfo(my_data, device, mem, VK_FALSE);
5433    print_mem_list(my_data, device);
5434    printCBList(my_data, device);
5435    loader_platform_thread_unlock_mutex(&globalLock);
5436    my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator);
5437}
5438
5439VkBool32 validateMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5440    VkBool32 skipCall = VK_FALSE;
5441
5442    if (size == 0) {
5443        // TODO: a size of 0 is not listed as an invalid use in the spec, should it be?
5444        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5445                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5446                           "VkMapMemory: Attempting to map memory range of size zero");
5447    }
5448
5449    auto mem_element = my_data->memObjMap.find(mem);
5450    if (mem_element != my_data->memObjMap.end()) {
5451        // It is an application error to call VkMapMemory on an object that is already mapped
5452        if (mem_element->second.memRange.size != 0) {
5453            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5454                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5455                               "VkMapMemory: Attempting to map memory on an already-mapped object %#" PRIxLEAST64, (uint64_t)mem);
5456        }
5457
5458        // Validate that offset + size is within object's allocationSize
5459        if (size == VK_WHOLE_SIZE) {
5460            if (offset >= mem_element->second.allocInfo.allocationSize) {
5461                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5462                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5463                                   "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5464                                   mem_element->second.allocInfo.allocationSize, mem_element->second.allocInfo.allocationSize);
5465            }
5466        } else {
5467            if ((offset + size) > mem_element->second.allocInfo.allocationSize) {
5468                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5469                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5470                                   "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5471                                   size + offset, mem_element->second.allocInfo.allocationSize);
5472            }
5473        }
5474    }
5475    return skipCall;
5476}
5477
5478void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5479    auto mem_element = my_data->memObjMap.find(mem);
5480    if (mem_element != my_data->memObjMap.end()) {
5481        MemRange new_range;
5482        new_range.offset = offset;
5483        new_range.size = size;
5484        mem_element->second.memRange = new_range;
5485    }
5486}
5487
5488VkBool32 deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
5489    VkBool32 skipCall = VK_FALSE;
5490    auto mem_element = my_data->memObjMap.find(mem);
5491    if (mem_element != my_data->memObjMap.end()) {
5492        if (!mem_element->second.memRange.size) {
5493            // Valid Usage: memory must currently be mapped
5494            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5495                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5496                               "Unmapping Memory without memory being mapped: mem obj %#" PRIxLEAST64, (uint64_t)mem);
5497        }
5498        mem_element->second.memRange.size = 0;
5499        if (mem_element->second.pData) {
5500            free(mem_element->second.pData);
5501            mem_element->second.pData = 0;
5502        }
5503    }
5504    return skipCall;
5505}
5506
5507static char NoncoherentMemoryFillValue = 0xb;
5508
5509void initializeAndTrackMemory(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize size, void **ppData) {
5510    auto mem_element = my_data->memObjMap.find(mem);
5511    if (mem_element != my_data->memObjMap.end()) {
5512        mem_element->second.pDriverData = *ppData;
5513        uint32_t index = mem_element->second.allocInfo.memoryTypeIndex;
5514        if (memProps.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
5515            mem_element->second.pData = 0;
5516        } else {
5517            if (size == VK_WHOLE_SIZE) {
5518                size = mem_element->second.allocInfo.allocationSize;
5519            }
5520            size_t convSize = (size_t)(size);
5521            mem_element->second.pData = malloc(2 * convSize);
5522            memset(mem_element->second.pData, NoncoherentMemoryFillValue, 2 * convSize);
5523            *ppData = static_cast<char *>(mem_element->second.pData) + (convSize / 2);
5524        }
5525    }
5526}
5527#endif
5528// Note: This function assumes that the global lock is held by the calling
5529// thread.
5530VkBool32 cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
5531    VkBool32 skip_call = VK_FALSE;
5532    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
5533    if (pCB) {
5534        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
5535            for (auto event : queryEventsPair.second) {
5536                if (my_data->eventMap[event].needsSignaled) {
5537                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5538                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
5539                                         "Cannot get query results on queryPool %" PRIu64
5540                                         " with index %d which was guarded by unsignaled event %" PRIu64 ".",
5541                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
5542                }
5543            }
5544        }
5545    }
5546    return skip_call;
5547}
5548// Remove given cmd_buffer from the global inFlight set.
5549//  Also, if given queue is valid, then remove the cmd_buffer from that queues
5550//  inFlightCmdBuffer set. Finally, check all other queues and if given cmd_buffer
5551//  is still in flight on another queue, add it back into the global set.
5552// Note: This function assumes that the global lock is held by the calling
5553// thread.
5554static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkQueue queue) {
5555    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
5556    dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
5557    if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) {
5558        dev_data->queueMap[queue].inFlightCmdBuffers.erase(cmd_buffer);
5559        for (auto q : dev_data->queues) {
5560            if ((q != queue) &&
5561                (dev_data->queueMap[q].inFlightCmdBuffers.find(cmd_buffer) != dev_data->queueMap[q].inFlightCmdBuffers.end())) {
5562                dev_data->globalInFlightCmdBuffers.insert(cmd_buffer);
5563                break;
5564            }
5565        }
5566    }
5567}
5568#if MTMERGE
5569static inline bool verifyFenceStatus(VkDevice device, VkFence fence, const char *apiCall) {
5570    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5571    VkBool32 skipCall = false;
5572    auto pFenceInfo = my_data->fenceMap.find(fence);
5573    if (pFenceInfo != my_data->fenceMap.end()) {
5574        if (pFenceInfo->second.firstTimeFlag != VK_TRUE) {
5575            if ((pFenceInfo->second.createInfo.flags & VK_FENCE_CREATE_SIGNALED_BIT) &&
5576                pFenceInfo->second.firstTimeFlag != VK_TRUE) {
5577                skipCall |=
5578                    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5579                            (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5580                            "%s specified fence %#" PRIxLEAST64 " already in SIGNALED state.", apiCall, (uint64_t)fence);
5581            }
5582            if (!pFenceInfo->second.queue && !pFenceInfo->second.swapchain) { // Checking status of unsubmitted fence
5583                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5584                                    reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5585                                    "%s called for fence %#" PRIxLEAST64 " which has not been submitted on a Queue or during "
5586                                    "acquire next image.",
5587                                    apiCall, reinterpret_cast<uint64_t &>(fence));
5588            }
5589        } else {
5590            pFenceInfo->second.firstTimeFlag = VK_FALSE;
5591        }
5592    }
5593    return skipCall;
5594}
5595#endif
5596VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5597vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
5598    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5599    VkBool32 skip_call = VK_FALSE;
5600#if MTMERGE
5601    // Verify fence status of submitted fences
5602    loader_platform_thread_lock_mutex(&globalLock);
5603    for (uint32_t i = 0; i < fenceCount; i++) {
5604        skip_call |= verifyFenceStatus(device, pFences[i], "vkWaitForFences");
5605    }
5606    loader_platform_thread_unlock_mutex(&globalLock);
5607    if (skip_call)
5608        return VK_ERROR_VALIDATION_FAILED_EXT;
5609#endif
5610    VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
5611
5612    if (result == VK_SUCCESS) {
5613        loader_platform_thread_lock_mutex(&globalLock);
5614        // When we know that all fences are complete we can clean/remove their CBs
5615        if (waitAll || fenceCount == 1) {
5616            for (uint32_t i = 0; i < fenceCount; ++i) {
5617#if MTMERGE
5618                update_fence_tracking(dev_data, pFences[i]);
5619#endif
5620                VkQueue fence_queue = dev_data->fenceMap[pFences[i]].queue;
5621                for (auto cmdBuffer : dev_data->fenceMap[pFences[i]].cmdBuffers) {
5622                    skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5623                    removeInFlightCmdBuffer(dev_data, cmdBuffer, fence_queue);
5624                }
5625            }
5626            decrementResources(dev_data, fenceCount, pFences);
5627        }
5628        // NOTE : Alternate case not handled here is when some fences have completed. In
5629        //  this case for app to guarantee which fences completed it will have to call
5630        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
5631        loader_platform_thread_unlock_mutex(&globalLock);
5632    }
5633    if (VK_FALSE != skip_call)
5634        return VK_ERROR_VALIDATION_FAILED_EXT;
5635    return result;
5636}
5637
5638VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device, VkFence fence) {
5639    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5640    bool skipCall = false;
5641    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5642#if MTMERGE
5643    loader_platform_thread_lock_mutex(&globalLock);
5644    skipCall = verifyFenceStatus(device, fence, "vkGetFenceStatus");
5645    loader_platform_thread_unlock_mutex(&globalLock);
5646    if (skipCall)
5647        return result;
5648#endif
5649    result = dev_data->device_dispatch_table->GetFenceStatus(device, fence);
5650    VkBool32 skip_call = VK_FALSE;
5651    loader_platform_thread_lock_mutex(&globalLock);
5652    if (result == VK_SUCCESS) {
5653#if MTMERGE
5654        update_fence_tracking(dev_data, fence);
5655#endif
5656        auto fence_queue = dev_data->fenceMap[fence].queue;
5657        for (auto cmdBuffer : dev_data->fenceMap[fence].cmdBuffers) {
5658            skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5659            removeInFlightCmdBuffer(dev_data, cmdBuffer, fence_queue);
5660        }
5661        decrementResources(dev_data, 1, &fence);
5662    }
5663    loader_platform_thread_unlock_mutex(&globalLock);
5664    if (VK_FALSE != skip_call)
5665        return VK_ERROR_VALIDATION_FAILED_EXT;
5666    return result;
5667}
5668
5669VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5670vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
5671    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5672    dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
5673    loader_platform_thread_lock_mutex(&globalLock);
5674    dev_data->queues.push_back(*pQueue);
5675    QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
5676    pQNode->device = device;
5677#if MTMERGE
5678    pQNode->lastRetiredId = 0;
5679    pQNode->lastSubmittedId = 0;
5680#endif
5681    loader_platform_thread_unlock_mutex(&globalLock);
5682}
5683
5684VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue) {
5685    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5686    decrementResources(dev_data, queue);
5687    VkBool32 skip_call = VK_FALSE;
5688    loader_platform_thread_lock_mutex(&globalLock);
5689    // Iterate over local set since we erase set members as we go in for loop
5690    auto local_cb_set = dev_data->queueMap[queue].inFlightCmdBuffers;
5691    for (auto cmdBuffer : local_cb_set) {
5692        skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5693        removeInFlightCmdBuffer(dev_data, cmdBuffer, queue);
5694    }
5695    dev_data->queueMap[queue].inFlightCmdBuffers.clear();
5696    loader_platform_thread_unlock_mutex(&globalLock);
5697    if (VK_FALSE != skip_call)
5698        return VK_ERROR_VALIDATION_FAILED_EXT;
5699    VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue);
5700#if MTMERGE
5701    if (VK_SUCCESS == result) {
5702        loader_platform_thread_lock_mutex(&globalLock);
5703        retire_queue_fences(dev_data, queue);
5704        loader_platform_thread_unlock_mutex(&globalLock);
5705    }
5706#endif
5707    return result;
5708}
5709
5710VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device) {
5711    VkBool32 skip_call = VK_FALSE;
5712    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5713    loader_platform_thread_lock_mutex(&globalLock);
5714    for (auto queue : dev_data->queues) {
5715        decrementResources(dev_data, queue);
5716        if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) {
5717            // Clear all of the queue inFlightCmdBuffers (global set cleared below)
5718            dev_data->queueMap[queue].inFlightCmdBuffers.clear();
5719        }
5720    }
5721    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5722        skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5723    }
5724    dev_data->globalInFlightCmdBuffers.clear();
5725    loader_platform_thread_unlock_mutex(&globalLock);
5726    if (VK_FALSE != skip_call)
5727        return VK_ERROR_VALIDATION_FAILED_EXT;
5728    VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device);
5729#if MTMERGE
5730    if (VK_SUCCESS == result) {
5731        loader_platform_thread_lock_mutex(&globalLock);
5732        retire_device_fences(dev_data, device);
5733        loader_platform_thread_unlock_mutex(&globalLock);
5734    }
5735#endif
5736    return result;
5737}
5738
5739VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
5740    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5741    bool skipCall = false;
5742    loader_platform_thread_lock_mutex(&globalLock);
5743    if (dev_data->fenceMap[fence].in_use.load()) {
5744        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5745                            (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5746                            "Fence %#" PRIx64 " is in use by a command buffer.", (uint64_t)(fence));
5747    }
5748#if MTMERGE
5749    delete_fence_info(dev_data, fence);
5750    auto item = dev_data->fenceMap.find(fence);
5751    if (item != dev_data->fenceMap.end()) {
5752        dev_data->fenceMap.erase(item);
5753    }
5754#endif
5755    loader_platform_thread_unlock_mutex(&globalLock);
5756    if (!skipCall)
5757        dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
5758}
5759
5760VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5761vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5762    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5763    dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
5764    loader_platform_thread_lock_mutex(&globalLock);
5765    auto item = dev_data->semaphoreMap.find(semaphore);
5766    if (item != dev_data->semaphoreMap.end()) {
5767        if (item->second.in_use.load()) {
5768            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5769                    reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
5770                    "Cannot delete semaphore %" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore));
5771        }
5772        dev_data->semaphoreMap.erase(semaphore);
5773    }
5774    loader_platform_thread_unlock_mutex(&globalLock);
5775    // TODO : Clean up any internal data structures using this obj.
5776}
5777
5778VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5779    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5780    bool skip_call = false;
5781    loader_platform_thread_lock_mutex(&globalLock);
5782    auto event_data = dev_data->eventMap.find(event);
5783    if (event_data != dev_data->eventMap.end()) {
5784        if (event_data->second.in_use.load()) {
5785            skip_call |= log_msg(
5786                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5787                reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
5788                "Cannot delete event %" PRIx64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event));
5789        }
5790        dev_data->eventMap.erase(event_data);
5791    }
5792    loader_platform_thread_unlock_mutex(&globalLock);
5793    if (!skip_call)
5794        dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator);
5795    // TODO : Clean up any internal data structures using this obj.
5796}
5797
5798VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5799vkDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5800    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5801        ->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
5802    // TODO : Clean up any internal data structures using this obj.
5803}
5804
5805VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
5806                                                     uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
5807                                                     VkQueryResultFlags flags) {
5808    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5809    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
5810    GLOBAL_CB_NODE *pCB = nullptr;
5811    loader_platform_thread_lock_mutex(&globalLock);
5812    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5813        pCB = getCBNode(dev_data, cmdBuffer);
5814        for (auto queryStatePair : pCB->queryToStateMap) {
5815            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
5816        }
5817    }
5818    VkBool32 skip_call = VK_FALSE;
5819    for (uint32_t i = 0; i < queryCount; ++i) {
5820        QueryObject query = {queryPool, firstQuery + i};
5821        auto queryElement = queriesInFlight.find(query);
5822        auto queryToStateElement = dev_data->queryToStateMap.find(query);
5823        if (queryToStateElement != dev_data->queryToStateMap.end()) {
5824        }
5825        // Available and in flight
5826        if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5827            queryToStateElement->second) {
5828            for (auto cmdBuffer : queryElement->second) {
5829                pCB = getCBNode(dev_data, cmdBuffer);
5830                auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
5831                if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
5832                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5833                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5834                                         "Cannot get query results on queryPool %" PRIu64 " with index %d which is in flight.",
5835                                         (uint64_t)(queryPool), firstQuery + i);
5836                } else {
5837                    for (auto event : queryEventElement->second) {
5838                        dev_data->eventMap[event].needsSignaled = true;
5839                    }
5840                }
5841            }
5842            // Unavailable and in flight
5843        } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5844                   !queryToStateElement->second) {
5845            // TODO : Can there be the same query in use by multiple command buffers in flight?
5846            bool make_available = false;
5847            for (auto cmdBuffer : queryElement->second) {
5848                pCB = getCBNode(dev_data, cmdBuffer);
5849                make_available |= pCB->queryToStateMap[query];
5850            }
5851            if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5852                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5853                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5854                                     "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5855                                     (uint64_t)(queryPool), firstQuery + i);
5856            }
5857            // Unavailable
5858        } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5859            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT,
5860                                 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5861                                 "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5862                                 (uint64_t)(queryPool), firstQuery + i);
5863            // Unitialized
5864        } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5865            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT,
5866                                 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5867                                 "Cannot get query results on queryPool %" PRIu64 " with index %d which is uninitialized.",
5868                                 (uint64_t)(queryPool), firstQuery + i);
5869        }
5870    }
5871    loader_platform_thread_unlock_mutex(&globalLock);
5872    if (skip_call)
5873        return VK_ERROR_VALIDATION_FAILED_EXT;
5874    return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride,
5875                                                                flags);
5876}
5877
5878VkBool32 validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5879    VkBool32 skip_call = VK_FALSE;
5880    auto buffer_data = my_data->bufferMap.find(buffer);
5881    if (buffer_data == my_data->bufferMap.end()) {
5882        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5883                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5884                             "Cannot free buffer %" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5885    } else {
5886        if (buffer_data->second.in_use.load()) {
5887            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5888                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5889                                 "Cannot free buffer %" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5890        }
5891    }
5892    return skip_call;
5893}
5894
5895VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5896vkDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
5897    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5898    VkBool32 skipCall = VK_FALSE;
5899    loader_platform_thread_lock_mutex(&globalLock);
5900#if MTMERGE
5901    auto item = dev_data->bufferBindingMap.find((uint64_t)buffer);
5902    if (item != dev_data->bufferBindingMap.end()) {
5903        skipCall = clear_object_binding(dev_data, device, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5904        dev_data->bufferBindingMap.erase(item);
5905    }
5906#endif
5907    if (!validateIdleBuffer(dev_data, buffer) && (VK_FALSE == skipCall)) {
5908        loader_platform_thread_unlock_mutex(&globalLock);
5909        dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator);
5910        loader_platform_thread_lock_mutex(&globalLock);
5911    }
5912    dev_data->bufferMap.erase(buffer);
5913    loader_platform_thread_unlock_mutex(&globalLock);
5914}
5915
5916VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5917vkDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5918    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5919    dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
5920    loader_platform_thread_lock_mutex(&globalLock);
5921    auto item = dev_data->bufferViewMap.find(bufferView);
5922    if (item != dev_data->bufferViewMap.end()) {
5923        dev_data->bufferViewMap.erase(item);
5924    }
5925    loader_platform_thread_unlock_mutex(&globalLock);
5926}
5927
5928VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5929    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5930    VkBool32 skipCall = VK_FALSE;
5931#if MTMERGE
5932    loader_platform_thread_lock_mutex(&globalLock);
5933    auto item = dev_data->imageBindingMap.find((uint64_t)image);
5934    if (item != dev_data->imageBindingMap.end()) {
5935        skipCall = clear_object_binding(dev_data, device, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5936        dev_data->imageBindingMap.erase(item);
5937    }
5938    loader_platform_thread_unlock_mutex(&globalLock);
5939#endif
5940    if (VK_FALSE == skipCall)
5941        dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
5942
5943    loader_platform_thread_lock_mutex(&globalLock);
5944    const auto& entry = dev_data->imageMap.find(image);
5945    if (entry != dev_data->imageMap.end()) {
5946        // Clear any memory mapping for this image
5947        const auto &mem_entry = dev_data->memObjMap.find(entry->second.mem);
5948        if (mem_entry != dev_data->memObjMap.end())
5949            mem_entry->second.image = VK_NULL_HANDLE;
5950
5951        // Remove image from imageMap
5952        dev_data->imageMap.erase(entry);
5953    }
5954    const auto& subEntry = dev_data->imageSubresourceMap.find(image);
5955    if (subEntry != dev_data->imageSubresourceMap.end()) {
5956        for (const auto& pair : subEntry->second) {
5957            dev_data->imageLayoutMap.erase(pair);
5958        }
5959        dev_data->imageSubresourceMap.erase(subEntry);
5960    }
5961    loader_platform_thread_unlock_mutex(&globalLock);
5962}
5963#if MTMERGE
5964VkBool32 print_memory_range_error(layer_data *dev_data, const uint64_t object_handle, const uint64_t other_handle,
5965                                  VkDebugReportObjectTypeEXT object_type) {
5966    if (object_type == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) {
5967        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5968                       MEMTRACK_INVALID_ALIASING, "MEM", "Buffer %" PRIx64 " is alised with image %" PRIx64, object_handle,
5969                       other_handle);
5970    } else {
5971        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5972                       MEMTRACK_INVALID_ALIASING, "MEM", "Image %" PRIx64 " is alised with buffer %" PRIx64, object_handle,
5973                       other_handle);
5974    }
5975}
5976
5977VkBool32 validate_memory_range(layer_data *dev_data, const vector<MEMORY_RANGE> &ranges, const MEMORY_RANGE &new_range,
5978                               VkDebugReportObjectTypeEXT object_type) {
5979    VkBool32 skip_call = false;
5980
5981    for (auto range : ranges) {
5982        if ((range.end & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)) <
5983            (new_range.start & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)))
5984            continue;
5985        if ((range.start & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)) >
5986            (new_range.end & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)))
5987            continue;
5988        skip_call |= print_memory_range_error(dev_data, new_range.handle, range.handle, object_type);
5989    }
5990    return skip_call;
5991}
5992
5993VkBool32 validate_buffer_image_aliasing(layer_data *dev_data, uint64_t handle, VkDeviceMemory mem, VkDeviceSize memoryOffset,
5994                                        VkMemoryRequirements memRequirements, vector<MEMORY_RANGE> &ranges,
5995                                        const vector<MEMORY_RANGE> &other_ranges, VkDebugReportObjectTypeEXT object_type) {
5996    MEMORY_RANGE range;
5997    range.handle = handle;
5998    range.memory = mem;
5999    range.start = memoryOffset;
6000    range.end = memoryOffset + memRequirements.size - 1;
6001    ranges.push_back(range);
6002    return validate_memory_range(dev_data, other_ranges, range, object_type);
6003}
6004
6005VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6006vkBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
6007    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6008    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6009    loader_platform_thread_lock_mutex(&globalLock);
6010    // Track objects tied to memory
6011    uint64_t buffer_handle = (uint64_t)(buffer);
6012    VkBool32 skipCall =
6013        set_mem_binding(dev_data, device, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
6014    add_object_binding_info(dev_data, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, mem);
6015    {
6016        VkMemoryRequirements memRequirements;
6017        // MTMTODO : Shouldn't this call down the chain?
6018        vkGetBufferMemoryRequirements(device, buffer, &memRequirements);
6019        skipCall |= validate_buffer_image_aliasing(dev_data, buffer_handle, mem, memoryOffset, memRequirements,
6020                                                   dev_data->memObjMap[mem].bufferRanges, dev_data->memObjMap[mem].imageRanges,
6021                                                   VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
6022    }
6023    print_mem_list(dev_data, device);
6024    loader_platform_thread_unlock_mutex(&globalLock);
6025    if (VK_FALSE == skipCall) {
6026        result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset);
6027    }
6028    return result;
6029}
6030
6031VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6032vkGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
6033    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6034    // TODO : What to track here?
6035    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
6036    my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
6037}
6038
6039VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6040vkGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
6041    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6042    // TODO : What to track here?
6043    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
6044    my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements);
6045}
6046#endif
6047VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6048vkDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
6049    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6050        ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
6051    // TODO : Clean up any internal data structures using this obj.
6052}
6053
6054VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6055vkDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
6056    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6057        ->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
6058    // TODO : Clean up any internal data structures using this obj.
6059}
6060
6061VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6062vkDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
6063    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator);
6064    // TODO : Clean up any internal data structures using this obj.
6065}
6066
6067VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6068vkDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
6069    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6070        ->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
6071    // TODO : Clean up any internal data structures using this obj.
6072}
6073
6074VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6075vkDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
6076    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator);
6077    // TODO : Clean up any internal data structures using this obj.
6078}
6079
6080VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6081vkDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
6082    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6083        ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
6084    // TODO : Clean up any internal data structures using this obj.
6085}
6086
6087VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6088vkDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
6089    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6090        ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
6091    // TODO : Clean up any internal data structures using this obj.
6092}
6093
6094VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6095vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
6096    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6097
6098    bool skip_call = false;
6099    loader_platform_thread_lock_mutex(&globalLock);
6100    for (uint32_t i = 0; i < commandBufferCount; i++) {
6101#if MTMERGE
6102        skip_call |= delete_cmd_buf_info(dev_data, commandPool, pCommandBuffers[i]);
6103#endif
6104        if (dev_data->globalInFlightCmdBuffers.count(pCommandBuffers[i])) {
6105            skip_call |=
6106                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6107                        reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6108                        "Attempt to free command buffer (%#" PRIxLEAST64 ") which is in use.",
6109                        reinterpret_cast<uint64_t>(pCommandBuffers[i]));
6110        }
6111        // Delete CB information structure, and remove from commandBufferMap
6112        auto cb = dev_data->commandBufferMap.find(pCommandBuffers[i]);
6113        if (cb != dev_data->commandBufferMap.end()) {
6114            // reset prior to delete for data clean-up
6115            resetCB(dev_data, (*cb).second->commandBuffer);
6116            delete (*cb).second;
6117            dev_data->commandBufferMap.erase(cb);
6118        }
6119
6120        // Remove commandBuffer reference from commandPoolMap
6121        dev_data->commandPoolMap[commandPool].commandBuffers.remove(pCommandBuffers[i]);
6122    }
6123#if MTMERGE
6124    printCBList(dev_data, device);
6125#endif
6126    loader_platform_thread_unlock_mutex(&globalLock);
6127
6128    if (!skip_call)
6129        dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
6130}
6131
6132VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
6133                                                                   const VkAllocationCallbacks *pAllocator,
6134                                                                   VkCommandPool *pCommandPool) {
6135    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6136
6137    VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
6138
6139    if (VK_SUCCESS == result) {
6140        loader_platform_thread_lock_mutex(&globalLock);
6141        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
6142        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
6143        loader_platform_thread_unlock_mutex(&globalLock);
6144    }
6145    return result;
6146}
6147
6148VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
6149                                                                 const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
6150
6151    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6152    VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
6153    if (result == VK_SUCCESS) {
6154        loader_platform_thread_lock_mutex(&globalLock);
6155        dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo;
6156        loader_platform_thread_unlock_mutex(&globalLock);
6157    }
6158    return result;
6159}
6160
6161VkBool32 validateCommandBuffersNotInUse(const layer_data *dev_data, VkCommandPool commandPool) {
6162    VkBool32 skipCall = VK_FALSE;
6163    auto pool_data = dev_data->commandPoolMap.find(commandPool);
6164    if (pool_data != dev_data->commandPoolMap.end()) {
6165        for (auto cmdBuffer : pool_data->second.commandBuffers) {
6166            if (dev_data->globalInFlightCmdBuffers.count(cmdBuffer)) {
6167                skipCall |=
6168                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT,
6169                            (uint64_t)(commandPool), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
6170                            "Cannot reset command pool %" PRIx64 " when allocated command buffer %" PRIx64 " is in use.",
6171                            (uint64_t)(commandPool), (uint64_t)(cmdBuffer));
6172            }
6173        }
6174    }
6175    return skipCall;
6176}
6177
6178// Destroy commandPool along with all of the commandBuffers allocated from that pool
6179VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6180vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
6181    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6182    bool commandBufferComplete = false;
6183    bool skipCall = false;
6184    loader_platform_thread_lock_mutex(&globalLock);
6185#if MTMERGE
6186    // Verify that command buffers in pool are complete (not in-flight)
6187    // MTMTODO : Merge this with code below (separate *NotInUse() call)
6188    for (auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6189         it != dev_data->commandPoolMap[commandPool].commandBuffers.end(); it++) {
6190        commandBufferComplete = VK_FALSE;
6191        skipCall = checkCBCompleted(dev_data, *it, &commandBufferComplete);
6192        if (VK_FALSE == commandBufferComplete) {
6193            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6194                                (uint64_t)(*it), __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6195                                "Destroying Command Pool 0x%" PRIxLEAST64 " before "
6196                                "its command buffer (0x%" PRIxLEAST64 ") has completed.",
6197                                (uint64_t)(commandPool), reinterpret_cast<uint64_t>(*it));
6198        }
6199    }
6200#endif
6201    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandPoolMap
6202    if (dev_data->commandPoolMap.find(commandPool) != dev_data->commandPoolMap.end()) {
6203        for (auto poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6204             poolCb != dev_data->commandPoolMap[commandPool].commandBuffers.end();) {
6205            auto del_cb = dev_data->commandBufferMap.find(*poolCb);
6206            delete (*del_cb).second;                  // delete CB info structure
6207            dev_data->commandBufferMap.erase(del_cb); // Remove this command buffer from cbMap
6208            poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.erase(
6209                poolCb); // Remove CB reference from commandPoolMap's list
6210        }
6211    }
6212    dev_data->commandPoolMap.erase(commandPool);
6213
6214    loader_platform_thread_unlock_mutex(&globalLock);
6215
6216    if (VK_TRUE == validateCommandBuffersNotInUse(dev_data, commandPool))
6217        return;
6218
6219    if (!skipCall)
6220        dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
6221#if MTMERGE
6222    loader_platform_thread_lock_mutex(&globalLock);
6223    auto item = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6224    // Remove command buffers from command buffer map
6225    while (item != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6226        auto del_item = item++;
6227        delete_cmd_buf_info(dev_data, commandPool, *del_item);
6228    }
6229    dev_data->commandPoolMap.erase(commandPool);
6230    loader_platform_thread_unlock_mutex(&globalLock);
6231#endif
6232}
6233
6234VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6235vkResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
6236    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6237    bool commandBufferComplete = false;
6238    bool skipCall = false;
6239    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6240#if MTMERGE
6241    // MTMTODO : Merge this with *NotInUse() call below
6242    loader_platform_thread_lock_mutex(&globalLock);
6243    auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6244    // Verify that CB's in pool are complete (not in-flight)
6245    while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6246        skipCall = checkCBCompleted(dev_data, (*it), &commandBufferComplete);
6247        if (!commandBufferComplete) {
6248            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6249                                (uint64_t)(*it), __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6250                                "Resetting CB %p before it has completed. You must check CB "
6251                                "flag before calling vkResetCommandBuffer().",
6252                                (*it));
6253        } else {
6254            // Clear memory references at this point.
6255            skipCall |= clear_cmd_buf_and_mem_references(dev_data, (*it));
6256        }
6257        ++it;
6258    }
6259    loader_platform_thread_unlock_mutex(&globalLock);
6260#endif
6261    if (VK_TRUE == validateCommandBuffersNotInUse(dev_data, commandPool))
6262        return VK_ERROR_VALIDATION_FAILED_EXT;
6263
6264    if (!skipCall)
6265        result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
6266
6267    // Reset all of the CBs allocated from this pool
6268    if (VK_SUCCESS == result) {
6269        loader_platform_thread_lock_mutex(&globalLock);
6270        auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6271        while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6272            resetCB(dev_data, (*it));
6273            ++it;
6274        }
6275        loader_platform_thread_unlock_mutex(&globalLock);
6276    }
6277    return result;
6278}
6279
6280VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
6281    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6282    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6283    bool skipCall = false;
6284    loader_platform_thread_lock_mutex(&globalLock);
6285    for (uint32_t i = 0; i < fenceCount; ++i) {
6286#if MTMERGE
6287        // Reset fence state in fenceCreateInfo structure
6288        // MTMTODO : Merge with code below
6289        auto fence_item = dev_data->fenceMap.find(pFences[i]);
6290        if (fence_item != dev_data->fenceMap.end()) {
6291            // Validate fences in SIGNALED state
6292            if (!(fence_item->second.createInfo.flags & VK_FENCE_CREATE_SIGNALED_BIT)) {
6293                // TODO: I don't see a Valid Usage section for ResetFences. This behavior should be documented there.
6294                skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6295                                   (uint64_t)pFences[i], __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
6296                                   "Fence %#" PRIxLEAST64 " submitted to VkResetFences in UNSIGNALED STATE", (uint64_t)pFences[i]);
6297            } else {
6298                fence_item->second.createInfo.flags =
6299                    static_cast<VkFenceCreateFlags>(fence_item->second.createInfo.flags & ~VK_FENCE_CREATE_SIGNALED_BIT);
6300            }
6301        }
6302#endif
6303        if (dev_data->fenceMap[pFences[i]].in_use.load()) {
6304            skipCall |=
6305                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6306                        reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
6307                        "Fence %#" PRIx64 " is in use by a command buffer.", reinterpret_cast<const uint64_t &>(pFences[i]));
6308        }
6309    }
6310    loader_platform_thread_unlock_mutex(&globalLock);
6311    if (!skipCall)
6312        result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences);
6313    return result;
6314}
6315
6316VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6317vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
6318    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6319#if MTMERGE
6320    // MTMTODO : Merge with code below
6321    loader_platform_thread_lock_mutex(&globalLock);
6322    auto item = dev_data->fbMap.find(framebuffer);
6323    if (item != dev_data->fbMap.end()) {
6324        dev_data->fbMap.erase(framebuffer);
6325    }
6326    loader_platform_thread_unlock_mutex(&globalLock);
6327#endif
6328    auto fbNode = dev_data->frameBufferMap.find(framebuffer);
6329    if (fbNode != dev_data->frameBufferMap.end()) {
6330        for (auto cb : fbNode->second.referencingCmdBuffers) {
6331            auto cbNode = dev_data->commandBufferMap.find(cb);
6332            if (cbNode != dev_data->commandBufferMap.end()) {
6333                // Set CB as invalid and record destroyed framebuffer
6334                cbNode->second->state = CB_INVALID;
6335                loader_platform_thread_lock_mutex(&globalLock);
6336                cbNode->second->destroyedFramebuffers.insert(framebuffer);
6337                loader_platform_thread_unlock_mutex(&globalLock);
6338            }
6339        }
6340        dev_data->frameBufferMap.erase(framebuffer);
6341    }
6342    dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
6343}
6344
6345VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6346vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
6347    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6348    dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
6349    loader_platform_thread_lock_mutex(&globalLock);
6350    dev_data->renderPassMap.erase(renderPass);
6351    dev_data->passMap.erase(renderPass);
6352    loader_platform_thread_unlock_mutex(&globalLock);
6353}
6354
6355VkBool32 validate_queue_family_indices(layer_data *dev_data, const char *function_name, const uint32_t count,
6356                                       const uint32_t *indices) {
6357    VkBool32 skipCall = VK_FALSE;
6358    for (auto i = 0; i < count; i++) {
6359        if (indices[i] >= dev_data->physDevProperties.queue_family_properties.size()) {
6360            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6361                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
6362                                "%s has QueueFamilyIndex greater than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER
6363                                ") for this device.",
6364                                function_name, dev_data->physDevProperties.queue_family_properties.size());
6365        }
6366    }
6367    return skipCall;
6368}
6369
6370VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6371vkCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
6372    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6373    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6374    bool skipCall = validate_queue_family_indices(dev_data, "vkCreateBuffer", pCreateInfo->queueFamilyIndexCount,
6375                                                  pCreateInfo->pQueueFamilyIndices);
6376    if (!skipCall) {
6377        result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
6378    }
6379
6380    if (VK_SUCCESS == result) {
6381        loader_platform_thread_lock_mutex(&globalLock);
6382#if MTMERGE
6383        add_object_create_info(dev_data, (uint64_t)*pBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, pCreateInfo);
6384#endif
6385        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
6386        dev_data->bufferMap[*pBuffer].create_info = unique_ptr<VkBufferCreateInfo>(new VkBufferCreateInfo(*pCreateInfo));
6387        dev_data->bufferMap[*pBuffer].in_use.store(0);
6388        loader_platform_thread_unlock_mutex(&globalLock);
6389    }
6390    return result;
6391}
6392
6393VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6394                                                                  const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
6395    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6396    VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
6397    if (VK_SUCCESS == result) {
6398        loader_platform_thread_lock_mutex(&globalLock);
6399        dev_data->bufferViewMap[*pView] = VkBufferViewCreateInfo(*pCreateInfo);
6400#if MTMERGE
6401        // In order to create a valid buffer view, the buffer must have been created with at least one of the
6402        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
6403        validate_buffer_usage_flags(dev_data, device, pCreateInfo->buffer,
6404                                    VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, VK_FALSE,
6405                                    "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
6406#endif
6407        loader_platform_thread_unlock_mutex(&globalLock);
6408    }
6409    return result;
6410}
6411
6412VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6413vkCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
6414    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6415    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6416    bool skipCall = validate_queue_family_indices(dev_data, "vkCreateImage", pCreateInfo->queueFamilyIndexCount,
6417                                                  pCreateInfo->pQueueFamilyIndices);
6418    if (!skipCall) {
6419        result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
6420    }
6421
6422    if (VK_SUCCESS == result) {
6423        loader_platform_thread_lock_mutex(&globalLock);
6424#if MTMERGE
6425        add_object_create_info(dev_data, (uint64_t)*pImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, pCreateInfo);
6426#endif
6427        IMAGE_LAYOUT_NODE image_node;
6428        image_node.layout = pCreateInfo->initialLayout;
6429        image_node.format = pCreateInfo->format;
6430        dev_data->imageMap[*pImage].createInfo = *pCreateInfo;
6431        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
6432        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
6433        dev_data->imageLayoutMap[subpair] = image_node;
6434        loader_platform_thread_unlock_mutex(&globalLock);
6435    }
6436    return result;
6437}
6438
6439static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
6440    /* expects globalLock to be held by caller */
6441
6442    auto image_node_it = dev_data->imageMap.find(image);
6443    if (image_node_it != dev_data->imageMap.end()) {
6444        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
6445         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
6446         * the actual values.
6447         */
6448        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
6449            range->levelCount = image_node_it->second.createInfo.mipLevels - range->baseMipLevel;
6450        }
6451
6452        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
6453            range->layerCount = image_node_it->second.createInfo.arrayLayers - range->baseArrayLayer;
6454        }
6455    }
6456}
6457
6458// Return the correct layer/level counts if the caller used the special
6459// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
6460static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6461                                         VkImage image) {
6462    /* expects globalLock to be held by caller */
6463
6464    *levels = range.levelCount;
6465    *layers = range.layerCount;
6466    auto image_node_it = dev_data->imageMap.find(image);
6467    if (image_node_it != dev_data->imageMap.end()) {
6468        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
6469            *levels = image_node_it->second.createInfo.mipLevels - range.baseMipLevel;
6470        }
6471        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
6472            *layers = image_node_it->second.createInfo.arrayLayers - range.baseArrayLayer;
6473        }
6474    }
6475}
6476
6477VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6478                                                                 const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6479    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6480    VkResult result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
6481    if (VK_SUCCESS == result) {
6482        loader_platform_thread_lock_mutex(&globalLock);
6483        VkImageViewCreateInfo localCI = VkImageViewCreateInfo(*pCreateInfo);
6484        ResolveRemainingLevelsLayers(dev_data, &localCI.subresourceRange, pCreateInfo->image);
6485        dev_data->imageViewMap[*pView] = localCI;
6486#if MTMERGE
6487        // Validate that img has correct usage flags set
6488        validate_image_usage_flags(dev_data, device, pCreateInfo->image,
6489                                   VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT |
6490                                       VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6491                                   VK_FALSE, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT");
6492#endif
6493        loader_platform_thread_unlock_mutex(&globalLock);
6494    }
6495    return result;
6496}
6497
6498VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6499vkCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6500    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6501    VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
6502    if (VK_SUCCESS == result) {
6503        loader_platform_thread_lock_mutex(&globalLock);
6504        FENCE_NODE *pFN = &dev_data->fenceMap[*pFence];
6505#if MTMERGE
6506        memset(pFN, 0, sizeof(MT_FENCE_INFO));
6507        memcpy(&(pFN->createInfo), pCreateInfo, sizeof(VkFenceCreateInfo));
6508        if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
6509            pFN->firstTimeFlag = VK_TRUE;
6510        }
6511#endif
6512        pFN->in_use.store(0);
6513        loader_platform_thread_unlock_mutex(&globalLock);
6514    }
6515    return result;
6516}
6517
6518// TODO handle pipeline caches
6519VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6520                                                     const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6521    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6522    VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6523    return result;
6524}
6525
6526VKAPI_ATTR void VKAPI_CALL
6527vkDestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
6528    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6529    dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator);
6530}
6531
6532VKAPI_ATTR VkResult VKAPI_CALL
6533vkGetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
6534    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6535    VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6536    return result;
6537}
6538
6539VKAPI_ATTR VkResult VKAPI_CALL
6540vkMergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
6541    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6542    VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6543    return result;
6544}
6545
6546VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6547vkCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6548                          const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6549                          VkPipeline *pPipelines) {
6550    VkResult result = VK_SUCCESS;
6551    // TODO What to do with pipelineCache?
6552    // The order of operations here is a little convoluted but gets the job done
6553    //  1. Pipeline create state is first shadowed into PIPELINE_NODE struct
6554    //  2. Create state is then validated (which uses flags setup during shadowing)
6555    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6556    VkBool32 skipCall = VK_FALSE;
6557    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6558    vector<PIPELINE_NODE *> pPipeNode(count);
6559    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6560
6561    uint32_t i = 0;
6562    loader_platform_thread_lock_mutex(&globalLock);
6563
6564    for (i = 0; i < count; i++) {
6565        pPipeNode[i] = initGraphicsPipeline(dev_data, &pCreateInfos[i]);
6566        skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
6567    }
6568
6569    if (VK_FALSE == skipCall) {
6570        loader_platform_thread_unlock_mutex(&globalLock);
6571        result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6572                                                                          pPipelines);
6573        loader_platform_thread_lock_mutex(&globalLock);
6574        for (i = 0; i < count; i++) {
6575            pPipeNode[i]->pipeline = pPipelines[i];
6576            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6577        }
6578        loader_platform_thread_unlock_mutex(&globalLock);
6579    } else {
6580        for (i = 0; i < count; i++) {
6581            if (pPipeNode[i]) {
6582                // If we allocated a pipeNode, need to clean it up here
6583                delete[] pPipeNode[i]->pVertexBindingDescriptions;
6584                delete[] pPipeNode[i]->pVertexAttributeDescriptions;
6585                delete[] pPipeNode[i]->pAttachments;
6586                delete pPipeNode[i];
6587            }
6588        }
6589        loader_platform_thread_unlock_mutex(&globalLock);
6590        return VK_ERROR_VALIDATION_FAILED_EXT;
6591    }
6592    return result;
6593}
6594
6595VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6596vkCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6597                         const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6598                         VkPipeline *pPipelines) {
6599    VkResult result = VK_SUCCESS;
6600    VkBool32 skipCall = VK_FALSE;
6601
6602    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6603    vector<PIPELINE_NODE *> pPipeNode(count);
6604    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6605
6606    uint32_t i = 0;
6607    loader_platform_thread_lock_mutex(&globalLock);
6608    for (i = 0; i < count; i++) {
6609        // TODO: Verify compute stage bits
6610
6611        // Create and initialize internal tracking data structure
6612        pPipeNode[i] = new PIPELINE_NODE;
6613        memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
6614
6615        // TODO: Add Compute Pipeline Verification
6616        // skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
6617    }
6618
6619    if (VK_FALSE == skipCall) {
6620        loader_platform_thread_unlock_mutex(&globalLock);
6621        result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6622                                                                         pPipelines);
6623        loader_platform_thread_lock_mutex(&globalLock);
6624        for (i = 0; i < count; i++) {
6625            pPipeNode[i]->pipeline = pPipelines[i];
6626            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6627        }
6628        loader_platform_thread_unlock_mutex(&globalLock);
6629    } else {
6630        for (i = 0; i < count; i++) {
6631            // Clean up any locally allocated data structures
6632            delete pPipeNode[i];
6633        }
6634        loader_platform_thread_unlock_mutex(&globalLock);
6635        return VK_ERROR_VALIDATION_FAILED_EXT;
6636    }
6637    return result;
6638}
6639
6640VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6641                                                               const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6642    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6643    VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6644    if (VK_SUCCESS == result) {
6645        loader_platform_thread_lock_mutex(&globalLock);
6646        dev_data->sampleMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
6647        loader_platform_thread_unlock_mutex(&globalLock);
6648    }
6649    return result;
6650}
6651
6652VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6653vkCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6654                            const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
6655    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6656    VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6657    if (VK_SUCCESS == result) {
6658        // TODOSC : Capture layout bindings set
6659        LAYOUT_NODE *pNewNode = new LAYOUT_NODE;
6660        if (NULL == pNewNode) {
6661            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
6662                        (uint64_t)*pSetLayout, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6663                        "Out of memory while attempting to allocate LAYOUT_NODE in vkCreateDescriptorSetLayout()"))
6664                return VK_ERROR_VALIDATION_FAILED_EXT;
6665        }
6666        memcpy((void *)&pNewNode->createInfo, pCreateInfo, sizeof(VkDescriptorSetLayoutCreateInfo));
6667        pNewNode->createInfo.pBindings = new VkDescriptorSetLayoutBinding[pCreateInfo->bindingCount];
6668        memcpy((void *)pNewNode->createInfo.pBindings, pCreateInfo->pBindings,
6669               sizeof(VkDescriptorSetLayoutBinding) * pCreateInfo->bindingCount);
6670        // g++ does not like reserve with size 0
6671        if (pCreateInfo->bindingCount)
6672            pNewNode->bindingToIndexMap.reserve(pCreateInfo->bindingCount);
6673        uint32_t totalCount = 0;
6674        for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
6675            if (!pNewNode->bindingToIndexMap.emplace(pCreateInfo->pBindings[i].binding, i).second) {
6676                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6677                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)*pSetLayout, __LINE__,
6678                            DRAWSTATE_INVALID_LAYOUT, "DS", "duplicated binding number in "
6679                                                            "VkDescriptorSetLayoutBinding"))
6680                    return VK_ERROR_VALIDATION_FAILED_EXT;
6681            } else {
6682                pNewNode->bindingToIndexMap[pCreateInfo->pBindings[i].binding] = i;
6683            }
6684            totalCount += pCreateInfo->pBindings[i].descriptorCount;
6685            if (pCreateInfo->pBindings[i].pImmutableSamplers) {
6686                VkSampler **ppIS = (VkSampler **)&pNewNode->createInfo.pBindings[i].pImmutableSamplers;
6687                *ppIS = new VkSampler[pCreateInfo->pBindings[i].descriptorCount];
6688                memcpy(*ppIS, pCreateInfo->pBindings[i].pImmutableSamplers,
6689                       pCreateInfo->pBindings[i].descriptorCount * sizeof(VkSampler));
6690            }
6691        }
6692        pNewNode->layout = *pSetLayout;
6693        pNewNode->startIndex = 0;
6694        if (totalCount > 0) {
6695            pNewNode->descriptorTypes.resize(totalCount);
6696            pNewNode->stageFlags.resize(totalCount);
6697            uint32_t offset = 0;
6698            uint32_t j = 0;
6699            VkDescriptorType dType;
6700            for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
6701                dType = pCreateInfo->pBindings[i].descriptorType;
6702                for (j = 0; j < pCreateInfo->pBindings[i].descriptorCount; j++) {
6703                    pNewNode->descriptorTypes[offset + j] = dType;
6704                    pNewNode->stageFlags[offset + j] = pCreateInfo->pBindings[i].stageFlags;
6705                    if ((dType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
6706                        (dType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
6707                        pNewNode->dynamicDescriptorCount++;
6708                    }
6709                }
6710                offset += j;
6711            }
6712            pNewNode->endIndex = pNewNode->startIndex + totalCount - 1;
6713        } else { // no descriptors
6714            pNewNode->endIndex = 0;
6715        }
6716        // Put new node at Head of global Layer list
6717        loader_platform_thread_lock_mutex(&globalLock);
6718        dev_data->descriptorSetLayoutMap[*pSetLayout] = pNewNode;
6719        loader_platform_thread_unlock_mutex(&globalLock);
6720    }
6721    return result;
6722}
6723
6724static bool validatePushConstantSize(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6725                                     const char *caller_name) {
6726    bool skipCall = false;
6727    if ((offset + size) > dev_data->physDevProperties.properties.limits.maxPushConstantsSize) {
6728        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6729                           DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
6730                                                                 "exceeds this device's maxPushConstantSize of %u.",
6731                           caller_name, offset, size, dev_data->physDevProperties.properties.limits.maxPushConstantsSize);
6732    }
6733    return skipCall;
6734}
6735
6736VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6737                                                      const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
6738    bool skipCall = false;
6739    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6740    uint32_t i = 0;
6741    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6742        skipCall |= validatePushConstantSize(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
6743                                             pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()");
6744        if ((pCreateInfo->pPushConstantRanges[i].size == 0) || ((pCreateInfo->pPushConstantRanges[i].size & 0x3) != 0)) {
6745            skipCall |=
6746                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6747                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constant index %u with "
6748                                                              "size %u. Size must be greater than zero and a multiple of 4.",
6749                        i, pCreateInfo->pPushConstantRanges[i].size);
6750        }
6751        // TODO : Add warning if ranges overlap
6752    }
6753    VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
6754    if (VK_SUCCESS == result) {
6755        loader_platform_thread_lock_mutex(&globalLock);
6756        // TODOSC : Merge capture of the setLayouts per pipeline
6757        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
6758        plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount);
6759        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6760            plNode.descriptorSetLayouts[i] = pCreateInfo->pSetLayouts[i];
6761        }
6762        plNode.pushConstantRanges.resize(pCreateInfo->pushConstantRangeCount);
6763        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6764            plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i];
6765        }
6766        loader_platform_thread_unlock_mutex(&globalLock);
6767    }
6768    return result;
6769}
6770
6771VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6772vkCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
6773                       VkDescriptorPool *pDescriptorPool) {
6774    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6775    VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
6776    if (VK_SUCCESS == result) {
6777        // Insert this pool into Global Pool LL at head
6778        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6779                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool %#" PRIxLEAST64,
6780                    (uint64_t)*pDescriptorPool))
6781            return VK_ERROR_VALIDATION_FAILED_EXT;
6782        DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
6783        if (NULL == pNewNode) {
6784            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6785                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6786                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
6787                return VK_ERROR_VALIDATION_FAILED_EXT;
6788        } else {
6789            loader_platform_thread_lock_mutex(&globalLock);
6790            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6791            loader_platform_thread_unlock_mutex(&globalLock);
6792        }
6793    } else {
6794        // Need to do anything if pool create fails?
6795    }
6796    return result;
6797}
6798
6799VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6800vkResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
6801    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6802    VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags);
6803    if (VK_SUCCESS == result) {
6804        loader_platform_thread_lock_mutex(&globalLock);
6805        clearDescriptorPool(dev_data, device, descriptorPool, flags);
6806        loader_platform_thread_unlock_mutex(&globalLock);
6807    }
6808    return result;
6809}
6810
6811VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6812vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
6813    VkBool32 skipCall = VK_FALSE;
6814    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6815
6816    loader_platform_thread_lock_mutex(&globalLock);
6817    // Verify that requested descriptorSets are available in pool
6818    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6819    if (!pPoolNode) {
6820        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6821                            (uint64_t)pAllocateInfo->descriptorPool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
6822                            "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
6823                            (uint64_t)pAllocateInfo->descriptorPool);
6824    } else { // Make sure pool has all the available descriptors before calling down chain
6825        skipCall |= validate_descriptor_availability_in_pool(dev_data, pPoolNode, pAllocateInfo->descriptorSetCount,
6826                                                             pAllocateInfo->pSetLayouts);
6827    }
6828    loader_platform_thread_unlock_mutex(&globalLock);
6829    if (skipCall)
6830        return VK_ERROR_VALIDATION_FAILED_EXT;
6831    VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6832    if (VK_SUCCESS == result) {
6833        loader_platform_thread_lock_mutex(&globalLock);
6834        DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6835        if (pPoolNode) {
6836            if (pAllocateInfo->descriptorSetCount == 0) {
6837                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6838                        pAllocateInfo->descriptorSetCount, __LINE__, DRAWSTATE_NONE, "DS",
6839                        "AllocateDescriptorSets called with 0 count");
6840            }
6841            for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
6842                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6843                        (uint64_t)pDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS", "Created Descriptor Set %#" PRIxLEAST64,
6844                        (uint64_t)pDescriptorSets[i]);
6845                // Create new set node and add to head of pool nodes
6846                SET_NODE *pNewNode = new SET_NODE;
6847                if (NULL == pNewNode) {
6848                    if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6849                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6850                                DRAWSTATE_OUT_OF_MEMORY, "DS",
6851                                "Out of memory while attempting to allocate SET_NODE in vkAllocateDescriptorSets()"))
6852                        return VK_ERROR_VALIDATION_FAILED_EXT;
6853                } else {
6854                    // TODO : Pool should store a total count of each type of Descriptor available
6855                    //  When descriptors are allocated, decrement the count and validate here
6856                    //  that the count doesn't go below 0. One reset/free need to bump count back up.
6857                    // Insert set at head of Set LL for this pool
6858                    pNewNode->pNext = pPoolNode->pSets;
6859                    pNewNode->in_use.store(0);
6860                    pPoolNode->pSets = pNewNode;
6861                    LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pAllocateInfo->pSetLayouts[i]);
6862                    if (NULL == pLayout) {
6863                        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6864                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pAllocateInfo->pSetLayouts[i],
6865                                    __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
6866                                    "Unable to find set layout node for layout %#" PRIxLEAST64
6867                                    " specified in vkAllocateDescriptorSets() call",
6868                                    (uint64_t)pAllocateInfo->pSetLayouts[i]))
6869                            return VK_ERROR_VALIDATION_FAILED_EXT;
6870                    }
6871                    pNewNode->pLayout = pLayout;
6872                    pNewNode->pool = pAllocateInfo->descriptorPool;
6873                    pNewNode->set = pDescriptorSets[i];
6874                    pNewNode->descriptorCount = (pLayout->createInfo.bindingCount != 0) ? pLayout->endIndex + 1 : 0;
6875                    if (pNewNode->descriptorCount) {
6876                        size_t descriptorArraySize = sizeof(GENERIC_HEADER *) * pNewNode->descriptorCount;
6877                        pNewNode->ppDescriptors = new GENERIC_HEADER *[descriptorArraySize];
6878                        memset(pNewNode->ppDescriptors, 0, descriptorArraySize);
6879                    }
6880                    dev_data->setMap[pDescriptorSets[i]] = pNewNode;
6881                }
6882            }
6883        }
6884        loader_platform_thread_unlock_mutex(&globalLock);
6885    }
6886    return result;
6887}
6888
6889VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6890vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6891    VkBool32 skipCall = VK_FALSE;
6892    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6893    // Make sure that no sets being destroyed are in-flight
6894    loader_platform_thread_lock_mutex(&globalLock);
6895    for (uint32_t i = 0; i < count; ++i)
6896        skipCall |= validateIdleDescriptorSet(dev_data, pDescriptorSets[i], "vkFreeDesriptorSets");
6897    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, descriptorPool);
6898    if (pPoolNode && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pPoolNode->createInfo.flags)) {
6899        // Can't Free from a NON_FREE pool
6900        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
6901                            (uint64_t)device, __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
6902                            "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6903                            "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
6904    }
6905    loader_platform_thread_unlock_mutex(&globalLock);
6906    if (VK_FALSE != skipCall)
6907        return VK_ERROR_VALIDATION_FAILED_EXT;
6908    VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6909    if (VK_SUCCESS == result) {
6910        // For each freed descriptor add it back into the pool as available
6911        loader_platform_thread_lock_mutex(&globalLock);
6912        for (uint32_t i = 0; i < count; ++i) {
6913            SET_NODE *pSet = dev_data->setMap[pDescriptorSets[i]]; // getSetNode() without locking
6914            invalidateBoundCmdBuffers(dev_data, pSet);
6915            LAYOUT_NODE *pLayout = pSet->pLayout;
6916            uint32_t typeIndex = 0, poolSizeCount = 0;
6917            for (uint32_t j = 0; j < pLayout->createInfo.bindingCount; ++j) {
6918                typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType);
6919                poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount;
6920                pPoolNode->availableDescriptorTypeCount[typeIndex] += poolSizeCount;
6921            }
6922        }
6923        loader_platform_thread_unlock_mutex(&globalLock);
6924    }
6925    // TODO : Any other clean-up or book-keeping to do here?
6926    return result;
6927}
6928
6929VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6930vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6931                       uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6932    // dsUpdate will return VK_TRUE only if a bailout error occurs, so we want to call down tree when update returns VK_FALSE
6933    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6934    loader_platform_thread_lock_mutex(&globalLock);
6935#if MTMERGE
6936    // MTMTODO : Merge this in with existing update code below and handle descriptor copies case
6937    uint32_t j = 0;
6938    for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
6939        if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
6940            for (j = 0; j < pDescriptorWrites[i].descriptorCount; ++j) {
6941                dev_data->descriptorSetMap[pDescriptorWrites[i].dstSet].images.push_back(
6942                    pDescriptorWrites[i].pImageInfo[j].imageView);
6943            }
6944        } else if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
6945            for (j = 0; j < pDescriptorWrites[i].descriptorCount; ++j) {
6946                dev_data->descriptorSetMap[pDescriptorWrites[i].dstSet].buffers.push_back(
6947                    dev_data->bufferViewMap[pDescriptorWrites[i].pTexelBufferView[j]].buffer);
6948            }
6949        } else if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
6950                   pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
6951            for (j = 0; j < pDescriptorWrites[i].descriptorCount; ++j) {
6952                dev_data->descriptorSetMap[pDescriptorWrites[i].dstSet].buffers.push_back(
6953                    pDescriptorWrites[i].pBufferInfo[j].buffer);
6954            }
6955        }
6956    }
6957#endif
6958    VkBool32 rtn = dsUpdate(dev_data, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
6959    loader_platform_thread_unlock_mutex(&globalLock);
6960    if (!rtn) {
6961        dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6962                                                              pDescriptorCopies);
6963    }
6964}
6965
6966VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6967vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6968    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6969    VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6970    if (VK_SUCCESS == result) {
6971        loader_platform_thread_lock_mutex(&globalLock);
6972        for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6973#if MTMERGE
6974            add_cmd_buf_info(dev_data, pCreateInfo->commandPool, pCommandBuffer[i]);
6975#endif
6976            // Validate command pool
6977            if (dev_data->commandPoolMap.find(pCreateInfo->commandPool) != dev_data->commandPoolMap.end()) {
6978                // Add command buffer to its commandPool map
6979#if !MTMERGE
6980                dev_data->commandPoolMap[pCreateInfo->commandPool].commandBuffers.push_back(pCommandBuffer[i]);
6981#endif
6982                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6983                // Add command buffer to map
6984                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6985                resetCB(dev_data, pCommandBuffer[i]);
6986                pCB->createInfo = *pCreateInfo;
6987                pCB->device = device;
6988            }
6989        }
6990#if MTMERGE
6991        printCBList(dev_data, device);
6992#endif
6993        loader_platform_thread_unlock_mutex(&globalLock);
6994    }
6995    return result;
6996}
6997
6998VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6999vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
7000    VkBool32 skipCall = VK_FALSE;
7001    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7002    loader_platform_thread_lock_mutex(&globalLock);
7003    // Validate command buffer level
7004    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7005    if (pCB) {
7006#if MTMERGE
7007        bool commandBufferComplete = false;
7008        // MTMTODO : Merge this with code below
7009        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
7010        skipCall = checkCBCompleted(dev_data, commandBuffer, &commandBufferComplete);
7011
7012        if (!commandBufferComplete) {
7013            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7014                                (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
7015                                "Calling vkBeginCommandBuffer() on active CB %p before it has completed. "
7016                                "You must check CB flag before this call.",
7017                                commandBuffer);
7018        }
7019#endif
7020        if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
7021            // Secondary Command Buffer
7022            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
7023            if (!pInfo) {
7024                skipCall |=
7025                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7026                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7027                            "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must have inheritance info.",
7028                            reinterpret_cast<void *>(commandBuffer));
7029            } else {
7030                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
7031                    if (!pInfo->renderPass) { // renderpass should NOT be null for an Secondary CB
7032                        skipCall |= log_msg(
7033                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7034                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7035                            "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must specify a valid renderpass parameter.",
7036                            reinterpret_cast<void *>(commandBuffer));
7037                    }
7038                    if (!pInfo->framebuffer) { // framebuffer may be null for an Secondary CB, but this affects perf
7039                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7040                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7041                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE,
7042                                            "DS", "vkBeginCommandBuffer(): Secondary Command Buffers (%p) may perform better if a "
7043                                                  "valid framebuffer parameter is specified.",
7044                                            reinterpret_cast<void *>(commandBuffer));
7045                    } else {
7046                        string errorString = "";
7047                        auto fbNode = dev_data->frameBufferMap.find(pInfo->framebuffer);
7048                        if (fbNode != dev_data->frameBufferMap.end()) {
7049                            VkRenderPass fbRP = fbNode->second.createInfo.renderPass;
7050                            if (!verify_renderpass_compatibility(dev_data, fbRP, pInfo->renderPass, errorString)) {
7051                                // renderPass that framebuffer was created with
7052                                // must
7053                                // be compatible with local renderPass
7054                                skipCall |=
7055                                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7056                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7057                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
7058                                            "DS", "vkBeginCommandBuffer(): Secondary Command "
7059                                                  "Buffer (%p) renderPass (%#" PRIxLEAST64 ") is incompatible w/ framebuffer "
7060                                                  "(%#" PRIxLEAST64 ") w/ render pass (%#" PRIxLEAST64 ") due to: %s",
7061                                            reinterpret_cast<void *>(commandBuffer), (uint64_t)(pInfo->renderPass),
7062                                            (uint64_t)(pInfo->framebuffer), (uint64_t)(fbRP), errorString.c_str());
7063                            }
7064                            // Connect this framebuffer to this cmdBuffer
7065                            fbNode->second.referencingCmdBuffers.insert(pCB->commandBuffer);
7066                        }
7067                    }
7068                }
7069                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
7070                     dev_data->physDevProperties.features.occlusionQueryPrecise == VK_FALSE) &&
7071                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
7072                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7073                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
7074                                        __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7075                                        "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must not have "
7076                                        "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
7077                                        "support precise occlusion queries.",
7078                                        reinterpret_cast<void *>(commandBuffer));
7079                }
7080            }
7081            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
7082                auto rp_data = dev_data->renderPassMap.find(pInfo->renderPass);
7083                if (rp_data != dev_data->renderPassMap.end() && rp_data->second && rp_data->second->pCreateInfo) {
7084                    if (pInfo->subpass >= rp_data->second->pCreateInfo->subpassCount) {
7085                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7086                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7087                                            DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7088                                            "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must has a subpass index (%d) "
7089                                            "that is less than the number of subpasses (%d).",
7090                                            (void *)commandBuffer, pInfo->subpass, rp_data->second->pCreateInfo->subpassCount);
7091                    }
7092                }
7093            }
7094        }
7095        if (CB_RECORDING == pCB->state) {
7096            skipCall |=
7097                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7098                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7099                        "vkBeginCommandBuffer(): Cannot call Begin on CB (%#" PRIxLEAST64
7100                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
7101                        (uint64_t)commandBuffer);
7102        } else if (CB_RECORDED == pCB->state) {
7103            VkCommandPool cmdPool = pCB->createInfo.commandPool;
7104            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
7105                skipCall |=
7106                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7107                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7108                            "Call to vkBeginCommandBuffer() on command buffer (%#" PRIxLEAST64
7109                            ") attempts to implicitly reset cmdBuffer created from command pool (%#" PRIxLEAST64
7110                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7111                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
7112            }
7113            resetCB(dev_data, commandBuffer);
7114        }
7115        // Set updated state here in case implicit reset occurs above
7116        pCB->state = CB_RECORDING;
7117        pCB->beginInfo = *pBeginInfo;
7118        if (pCB->beginInfo.pInheritanceInfo) {
7119            pCB->inheritanceInfo = *(pCB->beginInfo.pInheritanceInfo);
7120            pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo;
7121        }
7122    } else {
7123        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7124                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
7125                            "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB %p!", (void *)commandBuffer);
7126    }
7127    loader_platform_thread_unlock_mutex(&globalLock);
7128    if (VK_FALSE != skipCall) {
7129        return VK_ERROR_VALIDATION_FAILED_EXT;
7130    }
7131    VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo);
7132#if MTMERGE
7133    loader_platform_thread_lock_mutex(&globalLock);
7134    clear_cmd_buf_and_mem_references(dev_data, commandBuffer);
7135    loader_platform_thread_unlock_mutex(&globalLock);
7136#endif
7137    return result;
7138}
7139
7140VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffer commandBuffer) {
7141    VkBool32 skipCall = VK_FALSE;
7142    VkResult result = VK_SUCCESS;
7143    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7144    loader_platform_thread_lock_mutex(&globalLock);
7145    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7146    if (pCB) {
7147        if (pCB->state != CB_RECORDING) {
7148            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkEndCommandBuffer()");
7149        }
7150        for (auto query : pCB->activeQueries) {
7151            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7152                                DRAWSTATE_INVALID_QUERY, "DS",
7153                                "Ending command buffer with in progress query: queryPool %" PRIu64 ", index %d",
7154                                (uint64_t)(query.pool), query.index);
7155        }
7156    }
7157    if (VK_FALSE == skipCall) {
7158        loader_platform_thread_unlock_mutex(&globalLock);
7159        result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
7160        loader_platform_thread_lock_mutex(&globalLock);
7161        if (VK_SUCCESS == result) {
7162            pCB->state = CB_RECORDED;
7163            // Reset CB status flags
7164            pCB->status = 0;
7165            printCB(dev_data, commandBuffer);
7166        }
7167    } else {
7168        result = VK_ERROR_VALIDATION_FAILED_EXT;
7169    }
7170    loader_platform_thread_unlock_mutex(&globalLock);
7171    return result;
7172}
7173
7174VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
7175vkResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
7176    VkBool32 skipCall = VK_FALSE;
7177    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7178    loader_platform_thread_lock_mutex(&globalLock);
7179#if MTMERGE
7180    bool commandBufferComplete = false;
7181    // Verify that CB is complete (not in-flight)
7182    skipCall = checkCBCompleted(dev_data, commandBuffer, &commandBufferComplete);
7183    if (!commandBufferComplete) {
7184        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7185                            (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
7186                            "Resetting CB %p before it has completed. You must check CB "
7187                            "flag before calling vkResetCommandBuffer().",
7188                            commandBuffer);
7189    }
7190    // Clear memory references as this point.
7191    skipCall |= clear_cmd_buf_and_mem_references(dev_data, commandBuffer);
7192#endif
7193    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7194    VkCommandPool cmdPool = pCB->createInfo.commandPool;
7195    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
7196        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7197                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7198                            "Attempt to reset command buffer (%#" PRIxLEAST64 ") created from command pool (%#" PRIxLEAST64
7199                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7200                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
7201    }
7202    if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
7203        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7204                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7205                            "Attempt to reset command buffer (%#" PRIxLEAST64 ") which is in use.",
7206                            reinterpret_cast<uint64_t>(commandBuffer));
7207    }
7208    loader_platform_thread_unlock_mutex(&globalLock);
7209    if (skipCall != VK_FALSE)
7210        return VK_ERROR_VALIDATION_FAILED_EXT;
7211    VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags);
7212    if (VK_SUCCESS == result) {
7213        loader_platform_thread_lock_mutex(&globalLock);
7214        resetCB(dev_data, commandBuffer);
7215        loader_platform_thread_unlock_mutex(&globalLock);
7216    }
7217    return result;
7218}
7219#if MTMERGE
7220// TODO : For any vkCmdBind* calls that include an object which has mem bound to it,
7221//    need to account for that mem now having binding to given commandBuffer
7222#endif
7223VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7224vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
7225    VkBool32 skipCall = VK_FALSE;
7226    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7227    loader_platform_thread_lock_mutex(&globalLock);
7228#if MTMERGE
7229    // MTMTODO : Pulled this dead code in during merge, figure out what to do with it
7230#if 0 // FIXME: NEED TO FIX THE FOLLOWING CODE AND REMOVE THIS #if 0
7231    // TODO : If memory bound to pipeline, then need to tie that mem to commandBuffer
7232    if (getPipeline(pipeline)) {
7233        MT_CB_INFO *pCBInfo = get_cmd_buf_info(my_data, commandBuffer);
7234        if (pCBInfo) {
7235            pCBInfo->pipelines[pipelineBindPoint] = pipeline;
7236        }
7237    }
7238    else {
7239                "Attempt to bind Pipeline %p that doesn't exist!", (void*)pipeline);
7240        layerCbMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, pipeline, __LINE__, MEMTRACK_INVALID_OBJECT, (char *) "DS", (char *) str);
7241    }
7242#endif
7243#endif
7244    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7245    if (pCB) {
7246        skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
7247        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
7248            skipCall |=
7249                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7250                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
7251                        "Incorrectly binding compute pipeline (%#" PRIxLEAST64 ") during active RenderPass (%#" PRIxLEAST64 ")",
7252                        (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass);
7253        }
7254
7255        PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
7256        if (pPN) {
7257            pCB->lastBoundPipeline = pipeline;
7258            set_cb_pso_status(pCB, pPN);
7259            skipCall |= validatePipelineState(dev_data, pCB, pipelineBindPoint, pipeline);
7260        } else {
7261            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7262                                (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
7263                                "Attempt to bind Pipeline %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
7264        }
7265    }
7266    loader_platform_thread_unlock_mutex(&globalLock);
7267    if (VK_FALSE == skipCall)
7268        dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
7269}
7270
7271VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7272vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
7273    VkBool32 skipCall = VK_FALSE;
7274    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7275    loader_platform_thread_lock_mutex(&globalLock);
7276    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7277    if (pCB) {
7278        skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
7279        pCB->status |= CBSTATUS_VIEWPORT_SET;
7280        pCB->viewports.resize(viewportCount);
7281        memcpy(pCB->viewports.data(), pViewports, viewportCount * sizeof(VkViewport));
7282    }
7283    loader_platform_thread_unlock_mutex(&globalLock);
7284    if (VK_FALSE == skipCall)
7285        dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
7286}
7287
7288VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7289vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
7290    VkBool32 skipCall = VK_FALSE;
7291    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7292    loader_platform_thread_lock_mutex(&globalLock);
7293    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7294    if (pCB) {
7295        skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
7296        pCB->status |= CBSTATUS_SCISSOR_SET;
7297        pCB->scissors.resize(scissorCount);
7298        memcpy(pCB->scissors.data(), pScissors, scissorCount * sizeof(VkRect2D));
7299    }
7300    loader_platform_thread_unlock_mutex(&globalLock);
7301    if (VK_FALSE == skipCall)
7302        dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
7303}
7304
7305VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
7306    VkBool32 skipCall = VK_FALSE;
7307    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7308    loader_platform_thread_lock_mutex(&globalLock);
7309    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7310    if (pCB) {
7311        skipCall |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
7312        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
7313        pCB->lineWidth = lineWidth;
7314    }
7315    loader_platform_thread_unlock_mutex(&globalLock);
7316    if (VK_FALSE == skipCall)
7317        dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth);
7318}
7319
7320VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7321vkCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
7322    VkBool32 skipCall = VK_FALSE;
7323    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7324    loader_platform_thread_lock_mutex(&globalLock);
7325    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7326    if (pCB) {
7327        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
7328        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
7329        pCB->depthBiasConstantFactor = depthBiasConstantFactor;
7330        pCB->depthBiasClamp = depthBiasClamp;
7331        pCB->depthBiasSlopeFactor = depthBiasSlopeFactor;
7332    }
7333    loader_platform_thread_unlock_mutex(&globalLock);
7334    if (VK_FALSE == skipCall)
7335        dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp,
7336                                                         depthBiasSlopeFactor);
7337}
7338
7339VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
7340    VkBool32 skipCall = VK_FALSE;
7341    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7342    loader_platform_thread_lock_mutex(&globalLock);
7343    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7344    if (pCB) {
7345        skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
7346        pCB->status |= CBSTATUS_BLEND_SET;
7347        memcpy(pCB->blendConstants, blendConstants, 4 * sizeof(float));
7348    }
7349    loader_platform_thread_unlock_mutex(&globalLock);
7350    if (VK_FALSE == skipCall)
7351        dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants);
7352}
7353
7354VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7355vkCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
7356    VkBool32 skipCall = VK_FALSE;
7357    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7358    loader_platform_thread_lock_mutex(&globalLock);
7359    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7360    if (pCB) {
7361        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
7362        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
7363        pCB->minDepthBounds = minDepthBounds;
7364        pCB->maxDepthBounds = maxDepthBounds;
7365    }
7366    loader_platform_thread_unlock_mutex(&globalLock);
7367    if (VK_FALSE == skipCall)
7368        dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
7369}
7370
7371VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7372vkCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
7373    VkBool32 skipCall = VK_FALSE;
7374    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7375    loader_platform_thread_lock_mutex(&globalLock);
7376    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7377    if (pCB) {
7378        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
7379        if (faceMask & VK_STENCIL_FACE_FRONT_BIT) {
7380            pCB->front.compareMask = compareMask;
7381        }
7382        if (faceMask & VK_STENCIL_FACE_BACK_BIT) {
7383            pCB->back.compareMask = compareMask;
7384        }
7385        /* TODO: Do we need to track front and back separately? */
7386        /* TODO: We aren't capturing the faceMask, do we need to? */
7387        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
7388    }
7389    loader_platform_thread_unlock_mutex(&globalLock);
7390    if (VK_FALSE == skipCall)
7391        dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
7392}
7393
7394VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7395vkCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
7396    VkBool32 skipCall = VK_FALSE;
7397    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7398    loader_platform_thread_lock_mutex(&globalLock);
7399    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7400    if (pCB) {
7401        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7402        if (faceMask & VK_STENCIL_FACE_FRONT_BIT) {
7403            pCB->front.writeMask = writeMask;
7404        }
7405        if (faceMask & VK_STENCIL_FACE_BACK_BIT) {
7406            pCB->back.writeMask = writeMask;
7407        }
7408        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7409    }
7410    loader_platform_thread_unlock_mutex(&globalLock);
7411    if (VK_FALSE == skipCall)
7412        dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
7413}
7414
7415VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7416vkCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
7417    VkBool32 skipCall = VK_FALSE;
7418    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7419    loader_platform_thread_lock_mutex(&globalLock);
7420    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7421    if (pCB) {
7422        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7423        if (faceMask & VK_STENCIL_FACE_FRONT_BIT) {
7424            pCB->front.reference = reference;
7425        }
7426        if (faceMask & VK_STENCIL_FACE_BACK_BIT) {
7427            pCB->back.reference = reference;
7428        }
7429        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7430    }
7431    loader_platform_thread_unlock_mutex(&globalLock);
7432    if (VK_FALSE == skipCall)
7433        dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference);
7434}
7435
7436VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7437vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
7438                        uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7439                        const uint32_t *pDynamicOffsets) {
7440    VkBool32 skipCall = VK_FALSE;
7441    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7442    loader_platform_thread_lock_mutex(&globalLock);
7443#if MTMERGE
7444    // MTMTODO : Merge this with code below
7445    auto cb_data = dev_data->cbMap.find(commandBuffer);
7446    if (cb_data != dev_data->cbMap.end()) {
7447        std::vector<VkDescriptorSet> &activeDescriptorSets = cb_data->second.activeDescriptorSets;
7448        if (activeDescriptorSets.size() < (setCount + firstSet)) {
7449            activeDescriptorSets.resize(setCount + firstSet);
7450        }
7451        for (uint32_t i = 0; i < setCount; ++i) {
7452            activeDescriptorSets[i + firstSet] = pDescriptorSets[i];
7453        }
7454    }
7455    // TODO : Somewhere need to verify that all textures referenced by shaders in DS are in some type of *SHADER_READ* state
7456#endif
7457    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7458    if (pCB) {
7459        if (pCB->state == CB_RECORDING) {
7460            // Track total count of dynamic descriptor types to make sure we have an offset for each one
7461            uint32_t totalDynamicDescriptors = 0;
7462            string errorString = "";
7463            uint32_t lastSetIndex = firstSet + setCount - 1;
7464            if (lastSetIndex >= pCB->boundDescriptorSets.size())
7465                pCB->boundDescriptorSets.resize(lastSetIndex + 1);
7466            VkDescriptorSet oldFinalBoundSet = pCB->boundDescriptorSets[lastSetIndex];
7467            for (uint32_t i = 0; i < setCount; i++) {
7468                SET_NODE *pSet = getSetNode(dev_data, pDescriptorSets[i]);
7469                if (pSet) {
7470                    pCB->uniqueBoundSets.insert(pDescriptorSets[i]);
7471                    pSet->boundCmdBuffers.insert(commandBuffer);
7472                    pCB->lastBoundDescriptorSet = pDescriptorSets[i];
7473                    pCB->lastBoundPipelineLayout = layout;
7474                    pCB->boundDescriptorSets[i + firstSet] = pDescriptorSets[i];
7475                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7476                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7477                                        DRAWSTATE_NONE, "DS", "DS %#" PRIxLEAST64 " bound on pipeline %s",
7478                                        (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
7479                    if (!pSet->pUpdateStructs && (pSet->descriptorCount != 0)) {
7480                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7481                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
7482                                            __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
7483                                            "DS %#" PRIxLEAST64
7484                                            " bound but it was never updated. You may want to either update it or not bind it.",
7485                                            (uint64_t)pDescriptorSets[i]);
7486                    }
7487                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
7488                    if (!verify_set_layout_compatibility(dev_data, pSet, layout, i + firstSet, errorString)) {
7489                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7490                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
7491                                            __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
7492                                            "descriptorSet #%u being bound is not compatible with overlapping layout in "
7493                                            "pipelineLayout due to: %s",
7494                                            i, errorString.c_str());
7495                    }
7496                    if (pSet->pLayout->dynamicDescriptorCount) {
7497                        // First make sure we won't overstep bounds of pDynamicOffsets array
7498                        if ((totalDynamicDescriptors + pSet->pLayout->dynamicDescriptorCount) > dynamicOffsetCount) {
7499                            skipCall |=
7500                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7501                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7502                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7503                                        "descriptorSet #%u (%#" PRIxLEAST64
7504                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7505                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7506                                        i, (uint64_t)pDescriptorSets[i], pSet->pLayout->dynamicDescriptorCount,
7507                                        (dynamicOffsetCount - totalDynamicDescriptors));
7508                        } else { // Validate and store dynamic offsets with the set
7509                            // Validate Dynamic Offset Minimums
7510                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
7511                            for (uint32_t d = 0; d < pSet->descriptorCount; d++) {
7512                                if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7513                                    if (vk_safe_modulo(
7514                                            pDynamicOffsets[cur_dyn_offset],
7515                                            dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment) !=
7516                                        0) {
7517                                        skipCall |= log_msg(
7518                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7519                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7520                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
7521                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7522                                            "device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64,
7523                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7524                                            dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment);
7525                                    }
7526                                    cur_dyn_offset++;
7527                                } else if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7528                                    if (vk_safe_modulo(
7529                                            pDynamicOffsets[cur_dyn_offset],
7530                                            dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment) !=
7531                                        0) {
7532                                        skipCall |= log_msg(
7533                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7534                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7535                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
7536                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7537                                            "device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64,
7538                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7539                                            dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment);
7540                                    }
7541                                    cur_dyn_offset++;
7542                                }
7543                            }
7544                            // Keep running total of dynamic descriptor count to verify at the end
7545                            totalDynamicDescriptors += pSet->pLayout->dynamicDescriptorCount;
7546                        }
7547                    }
7548                } else {
7549                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7550                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7551                                        DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS %#" PRIxLEAST64 " that doesn't exist!",
7552                                        (uint64_t)pDescriptorSets[i]);
7553                }
7554            }
7555            skipCall |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescrsiptorSets()");
7556            // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7557            if (firstSet > 0) { // Check set #s below the first bound set
7558                for (uint32_t i = 0; i < firstSet; ++i) {
7559                    if (pCB->boundDescriptorSets[i] &&
7560                        !verify_set_layout_compatibility(dev_data, dev_data->setMap[pCB->boundDescriptorSets[i]], layout, i,
7561                                                         errorString)) {
7562                        skipCall |= log_msg(
7563                            dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7564                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pCB->boundDescriptorSets[i], __LINE__,
7565                            DRAWSTATE_NONE, "DS",
7566                            "DescriptorSetDS %#" PRIxLEAST64
7567                            " previously bound as set #%u was disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7568                            (uint64_t)pCB->boundDescriptorSets[i], i, (uint64_t)layout);
7569                        pCB->boundDescriptorSets[i] = VK_NULL_HANDLE;
7570                    }
7571                }
7572            }
7573            // Check if newly last bound set invalidates any remaining bound sets
7574            if ((pCB->boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7575                if (oldFinalBoundSet &&
7576                    !verify_set_layout_compatibility(dev_data, dev_data->setMap[oldFinalBoundSet], layout, lastSetIndex,
7577                                                     errorString)) {
7578                    skipCall |=
7579                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7580                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)oldFinalBoundSet, __LINE__,
7581                                DRAWSTATE_NONE, "DS", "DescriptorSetDS %#" PRIxLEAST64
7582                                                      " previously bound as set #%u is incompatible with set %#" PRIxLEAST64
7583                                                      " newly bound as set #%u so set #%u and any subsequent sets were "
7584                                                      "disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7585                                (uint64_t)oldFinalBoundSet, lastSetIndex, (uint64_t)pCB->boundDescriptorSets[lastSetIndex],
7586                                lastSetIndex, lastSetIndex + 1, (uint64_t)layout);
7587                    pCB->boundDescriptorSets.resize(lastSetIndex + 1);
7588                }
7589            }
7590            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7591            if (totalDynamicDescriptors != dynamicOffsetCount) {
7592                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7593                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7594                                    DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7595                                    "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7596                                    "is %u. It should exactly match the number of dynamic descriptors.",
7597                                    setCount, totalDynamicDescriptors, dynamicOffsetCount);
7598            }
7599            // Save dynamicOffsets bound to this CB
7600            for (uint32_t i = 0; i < dynamicOffsetCount; i++) {
7601                pCB->dynamicOffsets.emplace_back(pDynamicOffsets[i]);
7602            }
7603        } else {
7604            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7605        }
7606    }
7607    loader_platform_thread_unlock_mutex(&globalLock);
7608    if (VK_FALSE == skipCall)
7609        dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7610                                                               pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7611}
7612
7613VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7614vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
7615    VkBool32 skipCall = VK_FALSE;
7616    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7617    loader_platform_thread_lock_mutex(&globalLock);
7618#if MTMERGE
7619    VkDeviceMemory mem;
7620    skipCall =
7621        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7622    auto cb_data = dev_data->cbMap.find(commandBuffer);
7623    if (cb_data != dev_data->cbMap.end()) {
7624        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindIndexBuffer()"); };
7625        cb_data->second.validate_functions.push_back(function);
7626    }
7627    // TODO : Somewhere need to verify that IBs have correct usage state flagged
7628#endif
7629    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7630    if (pCB) {
7631        skipCall |= addCmd(dev_data, pCB, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7632        VkDeviceSize offset_align = 0;
7633        switch (indexType) {
7634        case VK_INDEX_TYPE_UINT16:
7635            offset_align = 2;
7636            break;
7637        case VK_INDEX_TYPE_UINT32:
7638            offset_align = 4;
7639            break;
7640        default:
7641            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7642            break;
7643        }
7644        if (!offset_align || (offset % offset_align)) {
7645            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7646                                DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7647                                "vkCmdBindIndexBuffer() offset (%#" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7648                                offset, string_VkIndexType(indexType));
7649        }
7650        pCB->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7651    }
7652    loader_platform_thread_unlock_mutex(&globalLock);
7653    if (VK_FALSE == skipCall)
7654        dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7655}
7656
7657void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7658    uint32_t end = firstBinding + bindingCount;
7659    if (pCB->currentDrawData.buffers.size() < end) {
7660        pCB->currentDrawData.buffers.resize(end);
7661    }
7662    for (uint32_t i = 0; i < bindingCount; ++i) {
7663        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7664    }
7665}
7666
7667void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7668
7669VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7670                                                                  uint32_t bindingCount, const VkBuffer *pBuffers,
7671                                                                  const VkDeviceSize *pOffsets) {
7672    VkBool32 skipCall = VK_FALSE;
7673    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7674    loader_platform_thread_lock_mutex(&globalLock);
7675#if MTMERGE
7676    for (uint32_t i = 0; i < bindingCount; ++i) {
7677        VkDeviceMemory mem;
7678        skipCall |= get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)(pBuffers[i]),
7679                                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7680        auto cb_data = dev_data->cbMap.find(commandBuffer);
7681        if (cb_data != dev_data->cbMap.end()) {
7682            std::function<VkBool32()> function =
7683                [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindVertexBuffers()"); };
7684            cb_data->second.validate_functions.push_back(function);
7685        }
7686    }
7687    // TODO : Somewhere need to verify that VBs have correct usage state flagged
7688#endif
7689    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7690    if (pCB) {
7691        addCmd(dev_data, pCB, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7692        updateResourceTracking(pCB, firstBinding, bindingCount, pBuffers);
7693    } else {
7694        skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7695    }
7696    loader_platform_thread_unlock_mutex(&globalLock);
7697    if (VK_FALSE == skipCall)
7698        dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7699}
7700
7701#if MTMERGE
7702/* expects globalLock to be held by caller */
7703bool markStoreImagesAndBuffersAsWritten(VkCommandBuffer commandBuffer) {
7704    bool skip_call = false;
7705    layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7706    auto cb_data = my_data->cbMap.find(commandBuffer);
7707    if (cb_data == my_data->cbMap.end())
7708        return skip_call;
7709    std::vector<VkDescriptorSet> &activeDescriptorSets = cb_data->second.activeDescriptorSets;
7710    for (auto descriptorSet : activeDescriptorSets) {
7711        auto ds_data = my_data->descriptorSetMap.find(descriptorSet);
7712        if (ds_data == my_data->descriptorSetMap.end())
7713            continue;
7714        std::vector<VkImageView> images = ds_data->second.images;
7715        std::vector<VkBuffer> buffers = ds_data->second.buffers;
7716        for (auto imageView : images) {
7717            auto iv_data = my_data->imageViewMap.find(imageView);
7718            if (iv_data == my_data->imageViewMap.end())
7719                continue;
7720            VkImage image = iv_data->second.image;
7721            VkDeviceMemory mem;
7722            skip_call |=
7723                get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7724            std::function<VkBool32()> function = [=]() {
7725                set_memory_valid(my_data, mem, true, image);
7726                return VK_FALSE;
7727            };
7728            cb_data->second.validate_functions.push_back(function);
7729        }
7730        for (auto buffer : buffers) {
7731            VkDeviceMemory mem;
7732            skip_call |=
7733                get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7734            std::function<VkBool32()> function = [=]() {
7735                set_memory_valid(my_data, mem, true);
7736                return VK_FALSE;
7737            };
7738            cb_data->second.validate_functions.push_back(function);
7739        }
7740    }
7741    return skip_call;
7742}
7743#endif
7744
7745VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7746                                                     uint32_t firstVertex, uint32_t firstInstance) {
7747    VkBool32 skipCall = VK_FALSE;
7748    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7749    loader_platform_thread_lock_mutex(&globalLock);
7750#if MTMERGE
7751    // MTMTODO : merge with code below
7752    skipCall = markStoreImagesAndBuffersAsWritten(commandBuffer);
7753#endif
7754    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7755    if (pCB) {
7756        skipCall |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
7757        pCB->drawCount[DRAW]++;
7758        skipCall |= validate_draw_state(dev_data, pCB, VK_FALSE);
7759        // TODO : Need to pass commandBuffer as srcObj here
7760        skipCall |=
7761            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7762                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW]++);
7763        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7764        if (VK_FALSE == skipCall) {
7765            updateResourceTrackingOnDraw(pCB);
7766        }
7767        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
7768    }
7769    loader_platform_thread_unlock_mutex(&globalLock);
7770    if (VK_FALSE == skipCall)
7771        dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
7772}
7773
7774VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
7775                                                            uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
7776                                                            uint32_t firstInstance) {
7777    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7778    VkBool32 skipCall = VK_FALSE;
7779    loader_platform_thread_lock_mutex(&globalLock);
7780#if MTMERGE
7781    // MTMTODO : merge with code below
7782    skipCall = markStoreImagesAndBuffersAsWritten(commandBuffer);
7783#endif
7784    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7785    if (pCB) {
7786        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
7787        pCB->drawCount[DRAW_INDEXED]++;
7788        skipCall |= validate_draw_state(dev_data, pCB, VK_TRUE);
7789        // TODO : Need to pass commandBuffer as srcObj here
7790        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7791                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7792                            "vkCmdDrawIndexed() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
7793        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7794        if (VK_FALSE == skipCall) {
7795            updateResourceTrackingOnDraw(pCB);
7796        }
7797        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
7798    }
7799    loader_platform_thread_unlock_mutex(&globalLock);
7800    if (VK_FALSE == skipCall)
7801        dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
7802                                                        firstInstance);
7803}
7804
7805VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7806vkCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7807    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7808    VkBool32 skipCall = VK_FALSE;
7809    loader_platform_thread_lock_mutex(&globalLock);
7810#if MTMERGE
7811    VkDeviceMemory mem;
7812    // MTMTODO : merge with code below
7813    skipCall =
7814        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7815    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndirect");
7816    skipCall |= markStoreImagesAndBuffersAsWritten(commandBuffer);
7817#endif
7818    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7819    if (pCB) {
7820        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
7821        pCB->drawCount[DRAW_INDIRECT]++;
7822        skipCall |= validate_draw_state(dev_data, pCB, VK_FALSE);
7823        // TODO : Need to pass commandBuffer as srcObj here
7824        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7825                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7826                            "vkCmdDrawIndirect() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
7827        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7828        if (VK_FALSE == skipCall) {
7829            updateResourceTrackingOnDraw(pCB);
7830        }
7831        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndirect");
7832    }
7833    loader_platform_thread_unlock_mutex(&globalLock);
7834    if (VK_FALSE == skipCall)
7835        dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
7836}
7837
7838VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7839vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7840    VkBool32 skipCall = VK_FALSE;
7841    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7842    loader_platform_thread_lock_mutex(&globalLock);
7843#if MTMERGE
7844    VkDeviceMemory mem;
7845    // MTMTODO : merge with code below
7846    skipCall =
7847        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7848    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndexedIndirect");
7849    skipCall |= markStoreImagesAndBuffersAsWritten(commandBuffer);
7850#endif
7851    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7852    if (pCB) {
7853        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
7854        pCB->drawCount[DRAW_INDEXED_INDIRECT]++;
7855        loader_platform_thread_unlock_mutex(&globalLock);
7856        skipCall |= validate_draw_state(dev_data, pCB, VK_TRUE);
7857        loader_platform_thread_lock_mutex(&globalLock);
7858        // TODO : Need to pass commandBuffer as srcObj here
7859        skipCall |=
7860            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7861                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call #%" PRIu64 ", reporting DS state:",
7862                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
7863        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7864        if (VK_FALSE == skipCall) {
7865            updateResourceTrackingOnDraw(pCB);
7866        }
7867        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexedIndirect");
7868    }
7869    loader_platform_thread_unlock_mutex(&globalLock);
7870    if (VK_FALSE == skipCall)
7871        dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
7872}
7873
7874VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7875    VkBool32 skipCall = VK_FALSE;
7876    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7877    loader_platform_thread_lock_mutex(&globalLock);
7878#if MTMERGE
7879    skipCall = markStoreImagesAndBuffersAsWritten(commandBuffer);
7880#endif
7881    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7882    if (pCB) {
7883        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
7884        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
7885    }
7886    loader_platform_thread_unlock_mutex(&globalLock);
7887    if (VK_FALSE == skipCall)
7888        dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
7889}
7890
7891VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7892vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7893    VkBool32 skipCall = VK_FALSE;
7894    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7895    loader_platform_thread_lock_mutex(&globalLock);
7896#if MTMERGE
7897    VkDeviceMemory mem;
7898    skipCall =
7899        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7900    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDispatchIndirect");
7901    skipCall |= markStoreImagesAndBuffersAsWritten(commandBuffer);
7902#endif
7903    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7904    if (pCB) {
7905        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
7906        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatchIndirect");
7907    }
7908    loader_platform_thread_unlock_mutex(&globalLock);
7909    if (VK_FALSE == skipCall)
7910        dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset);
7911}
7912
7913VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7914                                                           uint32_t regionCount, const VkBufferCopy *pRegions) {
7915    VkBool32 skipCall = VK_FALSE;
7916    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7917    loader_platform_thread_lock_mutex(&globalLock);
7918#if MTMERGE
7919    VkDeviceMemory mem;
7920    auto cb_data = dev_data->cbMap.find(commandBuffer);
7921    loader_platform_thread_lock_mutex(&globalLock);
7922    skipCall =
7923        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7924    if (cb_data != dev_data->cbMap.end()) {
7925        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBuffer()"); };
7926        cb_data->second.validate_functions.push_back(function);
7927    }
7928    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7929    skipCall |=
7930        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7931    if (cb_data != dev_data->cbMap.end()) {
7932        std::function<VkBool32()> function = [=]() {
7933            set_memory_valid(dev_data, mem, true);
7934            return VK_FALSE;
7935        };
7936        cb_data->second.validate_functions.push_back(function);
7937    }
7938    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7939    // Validate that SRC & DST buffers have correct usage flags set
7940    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
7941                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7942    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7943                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7944#endif
7945    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7946    if (pCB) {
7947        skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
7948        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBuffer");
7949    }
7950    loader_platform_thread_unlock_mutex(&globalLock);
7951    if (VK_FALSE == skipCall)
7952        dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7953}
7954
7955VkBool32 VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, VkImageSubresourceLayers subLayers,
7956                                 VkImageLayout srcImageLayout) {
7957    VkBool32 skip_call = VK_FALSE;
7958
7959    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7960    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7961    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7962        uint32_t layer = i + subLayers.baseArrayLayer;
7963        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7964        IMAGE_CMD_BUF_LAYOUT_NODE node;
7965        if (!FindLayout(pCB, srcImage, sub, node)) {
7966            SetLayout(pCB, srcImage, sub, {srcImageLayout, srcImageLayout});
7967            continue;
7968        }
7969        if (node.layout != srcImageLayout) {
7970            // TODO: Improve log message in the next pass
7971            skip_call |=
7972                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7973                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
7974                                                                        "and doesn't match the current layout %s.",
7975                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
7976        }
7977    }
7978    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
7979        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7980            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7981            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7982                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7983                                 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
7984        } else {
7985            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7986                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
7987                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
7988                                 string_VkImageLayout(srcImageLayout));
7989        }
7990    }
7991    return skip_call;
7992}
7993
7994VkBool32 VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkImageSubresourceLayers subLayers,
7995                               VkImageLayout destImageLayout) {
7996    VkBool32 skip_call = VK_FALSE;
7997
7998    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7999    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8000    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
8001        uint32_t layer = i + subLayers.baseArrayLayer;
8002        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
8003        IMAGE_CMD_BUF_LAYOUT_NODE node;
8004        if (!FindLayout(pCB, destImage, sub, node)) {
8005            SetLayout(pCB, destImage, sub, {destImageLayout, destImageLayout});
8006            continue;
8007        }
8008        if (node.layout != destImageLayout) {
8009            skip_call |=
8010                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
8011                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
8012                                                                        "doesn't match the current layout %s.",
8013                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
8014        }
8015    }
8016    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
8017        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
8018            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
8019            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
8020                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8021                                 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
8022        } else {
8023            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8024                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
8025                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
8026                                 string_VkImageLayout(destImageLayout));
8027        }
8028    }
8029    return skip_call;
8030}
8031
8032VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8033vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8034               VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
8035    VkBool32 skipCall = VK_FALSE;
8036    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8037    loader_platform_thread_lock_mutex(&globalLock);
8038#if MTMERGE
8039    VkDeviceMemory mem;
8040    auto cb_data = dev_data->cbMap.find(commandBuffer);
8041    // Validate that src & dst images have correct usage flags set
8042    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8043    if (cb_data != dev_data->cbMap.end()) {
8044        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImage()", srcImage); };
8045        cb_data->second.validate_functions.push_back(function);
8046    }
8047    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
8048    skipCall |=
8049        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8050    if (cb_data != dev_data->cbMap.end()) {
8051        std::function<VkBool32()> function = [=]() {
8052            set_memory_valid(dev_data, mem, true, dstImage);
8053            return VK_FALSE;
8054        };
8055        cb_data->second.validate_functions.push_back(function);
8056    }
8057    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
8058    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8059                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8060    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8061                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8062#endif
8063    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8064    if (pCB) {
8065        skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGE, "vkCmdCopyImage()");
8066        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImage");
8067        for (uint32_t i = 0; i < regionCount; ++i) {
8068            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].srcSubresource, srcImageLayout);
8069            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].dstSubresource, dstImageLayout);
8070        }
8071    }
8072    loader_platform_thread_unlock_mutex(&globalLock);
8073    if (VK_FALSE == skipCall)
8074        dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8075                                                      regionCount, pRegions);
8076}
8077
8078VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8079vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8080               VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
8081    VkBool32 skipCall = VK_FALSE;
8082    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8083    loader_platform_thread_lock_mutex(&globalLock);
8084#if MTMERGE
8085    VkDeviceMemory mem;
8086    auto cb_data = dev_data->cbMap.find(commandBuffer);
8087    // Validate that src & dst images have correct usage flags set
8088    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8089    if (cb_data != dev_data->cbMap.end()) {
8090        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBlitImage()", srcImage); };
8091        cb_data->second.validate_functions.push_back(function);
8092    }
8093    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
8094    skipCall |=
8095        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8096    if (cb_data != dev_data->cbMap.end()) {
8097        std::function<VkBool32()> function = [=]() {
8098            set_memory_valid(dev_data, mem, true, dstImage);
8099            return VK_FALSE;
8100        };
8101        cb_data->second.validate_functions.push_back(function);
8102    }
8103    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
8104    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8105                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8106    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8107                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8108#endif
8109    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8110    if (pCB) {
8111        skipCall |= addCmd(dev_data, pCB, CMD_BLITIMAGE, "vkCmdBlitImage()");
8112        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBlitImage");
8113    }
8114    loader_platform_thread_unlock_mutex(&globalLock);
8115    if (VK_FALSE == skipCall)
8116        dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8117                                                      regionCount, pRegions, filter);
8118}
8119
8120VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
8121                                                                  VkImage dstImage, VkImageLayout dstImageLayout,
8122                                                                  uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8123    VkBool32 skipCall = VK_FALSE;
8124    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8125    loader_platform_thread_lock_mutex(&globalLock);
8126#if MTMERGE
8127    VkDeviceMemory mem;
8128    auto cb_data = dev_data->cbMap.find(commandBuffer);
8129    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8130    if (cb_data != dev_data->cbMap.end()) {
8131        std::function<VkBool32()> function = [=]() {
8132            set_memory_valid(dev_data, mem, true, dstImage);
8133            return VK_FALSE;
8134        };
8135        cb_data->second.validate_functions.push_back(function);
8136    }
8137    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
8138    skipCall |=
8139        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8140    if (cb_data != dev_data->cbMap.end()) {
8141        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBufferToImage()"); };
8142        cb_data->second.validate_functions.push_back(function);
8143    }
8144    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
8145    // Validate that src buff & dst image have correct usage flags set
8146    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
8147                                            "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
8148    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8149                                           "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8150#endif
8151    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8152    if (pCB) {
8153        skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
8154        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBufferToImage");
8155        for (uint32_t i = 0; i < regionCount; ++i) {
8156            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].imageSubresource, dstImageLayout);
8157        }
8158    }
8159    loader_platform_thread_unlock_mutex(&globalLock);
8160    if (VK_FALSE == skipCall)
8161        dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
8162                                                              pRegions);
8163}
8164
8165VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
8166                                                                  VkImageLayout srcImageLayout, VkBuffer dstBuffer,
8167                                                                  uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8168    VkBool32 skipCall = VK_FALSE;
8169    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8170    loader_platform_thread_lock_mutex(&globalLock);
8171#if MTMERGE
8172    VkDeviceMemory mem;
8173    auto cb_data = dev_data->cbMap.find(commandBuffer);
8174    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8175    if (cb_data != dev_data->cbMap.end()) {
8176        std::function<VkBool32()> function =
8177            [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImageToBuffer()", srcImage); };
8178        cb_data->second.validate_functions.push_back(function);
8179    }
8180    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
8181    skipCall |=
8182        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8183    if (cb_data != dev_data->cbMap.end()) {
8184        std::function<VkBool32()> function = [=]() {
8185            set_memory_valid(dev_data, mem, true);
8186            return VK_FALSE;
8187        };
8188        cb_data->second.validate_functions.push_back(function);
8189    }
8190    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
8191    // Validate that dst buff & src image have correct usage flags set
8192    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8193                                           "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8194    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8195                                            "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8196#endif
8197    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8198    if (pCB) {
8199        skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
8200        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImageToBuffer");
8201        for (uint32_t i = 0; i < regionCount; ++i) {
8202            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].imageSubresource, srcImageLayout);
8203        }
8204    }
8205    loader_platform_thread_unlock_mutex(&globalLock);
8206    if (VK_FALSE == skipCall)
8207        dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
8208                                                              pRegions);
8209}
8210
8211VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
8212                                                             VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
8213    VkBool32 skipCall = VK_FALSE;
8214    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8215    loader_platform_thread_lock_mutex(&globalLock);
8216#if MTMERGE
8217    VkDeviceMemory mem;
8218    auto cb_data = dev_data->cbMap.find(commandBuffer);
8219    skipCall =
8220        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8221    if (cb_data != dev_data->cbMap.end()) {
8222        std::function<VkBool32()> function = [=]() {
8223            set_memory_valid(dev_data, mem, true);
8224            return VK_FALSE;
8225        };
8226        cb_data->second.validate_functions.push_back(function);
8227    }
8228    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdUpdateBuffer");
8229    // Validate that dst buff has correct usage flags set
8230    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8231                                            "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8232#endif
8233    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8234    if (pCB) {
8235        skipCall |= addCmd(dev_data, pCB, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
8236        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyUpdateBuffer");
8237    }
8238    loader_platform_thread_unlock_mutex(&globalLock);
8239    if (VK_FALSE == skipCall)
8240        dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
8241}
8242
8243VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8244vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
8245    VkBool32 skipCall = VK_FALSE;
8246    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8247    loader_platform_thread_lock_mutex(&globalLock);
8248#if MTMERGE
8249    VkDeviceMemory mem;
8250    auto cb_data = dev_data->cbMap.find(commandBuffer);
8251    skipCall =
8252        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8253    if (cb_data != dev_data->cbMap.end()) {
8254        std::function<VkBool32()> function = [=]() {
8255            set_memory_valid(dev_data, mem, true);
8256            return VK_FALSE;
8257        };
8258        cb_data->second.validate_functions.push_back(function);
8259    }
8260    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdFillBuffer");
8261    // Validate that dst buff has correct usage flags set
8262    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8263                                            "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8264#endif
8265    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8266    if (pCB) {
8267        skipCall |= addCmd(dev_data, pCB, CMD_FILLBUFFER, "vkCmdFillBuffer()");
8268        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyFillBuffer");
8269    }
8270    loader_platform_thread_unlock_mutex(&globalLock);
8271    if (VK_FALSE == skipCall)
8272        dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
8273}
8274
8275VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
8276                                                                 const VkClearAttachment *pAttachments, uint32_t rectCount,
8277                                                                 const VkClearRect *pRects) {
8278    VkBool32 skipCall = VK_FALSE;
8279    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8280    loader_platform_thread_lock_mutex(&globalLock);
8281    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8282    if (pCB) {
8283        skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
8284        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
8285        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
8286            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
8287            // TODO : commandBuffer should be srcObj
8288            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
8289            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
8290            // call CmdClearAttachments
8291            // Otherwise this seems more like a performance warning.
8292            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8293                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
8294                                "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
8295                                " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
8296                                (uint64_t)(commandBuffer));
8297        }
8298        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments");
8299    }
8300
8301    // Validate that attachment is in reference list of active subpass
8302    if (pCB->activeRenderPass) {
8303        const VkRenderPassCreateInfo *pRPCI = dev_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
8304        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
8305
8306        for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
8307            const VkClearAttachment *attachment = &pAttachments[attachment_idx];
8308            if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
8309                VkBool32 found = VK_FALSE;
8310                for (uint32_t i = 0; i < pSD->colorAttachmentCount; i++) {
8311                    if (attachment->colorAttachment == pSD->pColorAttachments[i].attachment) {
8312                        found = VK_TRUE;
8313                        break;
8314                    }
8315                }
8316                if (VK_FALSE == found) {
8317                    skipCall |= log_msg(
8318                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8319                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8320                        "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d",
8321                        attachment->colorAttachment, pCB->activeSubpass);
8322                }
8323            } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
8324                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
8325                    (pSD->pDepthStencilAttachment->attachment ==
8326                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
8327
8328                    skipCall |= log_msg(
8329                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8330                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8331                        "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found "
8332                        "in active subpass %d",
8333                        attachment->colorAttachment,
8334                        (pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED,
8335                        pCB->activeSubpass);
8336                }
8337            }
8338        }
8339    }
8340    loader_platform_thread_unlock_mutex(&globalLock);
8341    if (VK_FALSE == skipCall)
8342        dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8343}
8344
8345VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
8346                                                                VkImageLayout imageLayout, const VkClearColorValue *pColor,
8347                                                                uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
8348    VkBool32 skipCall = VK_FALSE;
8349    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8350    loader_platform_thread_lock_mutex(&globalLock);
8351#if MTMERGE
8352    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8353    VkDeviceMemory mem;
8354    auto cb_data = dev_data->cbMap.find(commandBuffer);
8355    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8356    if (cb_data != dev_data->cbMap.end()) {
8357        std::function<VkBool32()> function = [=]() {
8358            set_memory_valid(dev_data, mem, true, image);
8359            return VK_FALSE;
8360        };
8361        cb_data->second.validate_functions.push_back(function);
8362    }
8363    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearColorImage");
8364#endif
8365    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8366    if (pCB) {
8367        skipCall |= addCmd(dev_data, pCB, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
8368        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearColorImage");
8369    }
8370    loader_platform_thread_unlock_mutex(&globalLock);
8371    if (VK_FALSE == skipCall)
8372        dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
8373}
8374
8375VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8376vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8377                            const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
8378                            const VkImageSubresourceRange *pRanges) {
8379    VkBool32 skipCall = VK_FALSE;
8380    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8381    loader_platform_thread_lock_mutex(&globalLock);
8382#if MTMERGE
8383    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8384    VkDeviceMemory mem;
8385    auto cb_data = dev_data->cbMap.find(commandBuffer);
8386    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8387    if (cb_data != dev_data->cbMap.end()) {
8388        std::function<VkBool32()> function = [=]() {
8389            set_memory_valid(dev_data, mem, true, image);
8390            return VK_FALSE;
8391        };
8392        cb_data->second.validate_functions.push_back(function);
8393    }
8394    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearDepthStencilImage");
8395#endif
8396    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8397    if (pCB) {
8398        skipCall |= addCmd(dev_data, pCB, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
8399        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearDepthStencilImage");
8400    }
8401    loader_platform_thread_unlock_mutex(&globalLock);
8402    if (VK_FALSE == skipCall)
8403        dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
8404                                                                   pRanges);
8405}
8406
8407VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8408vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8409                  VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
8410    VkBool32 skipCall = VK_FALSE;
8411    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8412    loader_platform_thread_lock_mutex(&globalLock);
8413#if MTMERGE
8414    auto cb_data = dev_data->cbMap.find(commandBuffer);
8415    VkDeviceMemory mem;
8416    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8417    if (cb_data != dev_data->cbMap.end()) {
8418        std::function<VkBool32()> function =
8419            [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdResolveImage()", srcImage); };
8420        cb_data->second.validate_functions.push_back(function);
8421    }
8422    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
8423    skipCall |=
8424        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8425    if (cb_data != dev_data->cbMap.end()) {
8426        std::function<VkBool32()> function = [=]() {
8427            set_memory_valid(dev_data, mem, true, dstImage);
8428            return VK_FALSE;
8429        };
8430        cb_data->second.validate_functions.push_back(function);
8431    }
8432    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
8433#endif
8434    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8435    if (pCB) {
8436        skipCall |= addCmd(dev_data, pCB, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
8437        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResolveImage");
8438    }
8439    loader_platform_thread_unlock_mutex(&globalLock);
8440    if (VK_FALSE == skipCall)
8441        dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8442                                                         regionCount, pRegions);
8443}
8444
8445VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8446vkCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8447    VkBool32 skipCall = VK_FALSE;
8448    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8449    loader_platform_thread_lock_mutex(&globalLock);
8450    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8451    if (pCB) {
8452        skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
8453        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
8454        pCB->events.push_back(event);
8455        pCB->eventToStageMap[event] = stageMask;
8456    }
8457    loader_platform_thread_unlock_mutex(&globalLock);
8458    if (VK_FALSE == skipCall)
8459        dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask);
8460}
8461
8462VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8463vkCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8464    VkBool32 skipCall = VK_FALSE;
8465    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8466    loader_platform_thread_lock_mutex(&globalLock);
8467    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8468    if (pCB) {
8469        skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
8470        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
8471        pCB->events.push_back(event);
8472    }
8473    loader_platform_thread_unlock_mutex(&globalLock);
8474    if (VK_FALSE == skipCall)
8475        dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask);
8476}
8477
8478VkBool32 TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount, const VkImageMemoryBarrier *pImgMemBarriers) {
8479    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8480    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8481    VkBool32 skip = VK_FALSE;
8482    uint32_t levelCount = 0;
8483    uint32_t layerCount = 0;
8484
8485    for (uint32_t i = 0; i < memBarrierCount; ++i) {
8486        auto mem_barrier = &pImgMemBarriers[i];
8487        if (!mem_barrier)
8488            continue;
8489        // TODO: Do not iterate over every possibility - consolidate where
8490        // possible
8491        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
8492
8493        for (uint32_t j = 0; j < levelCount; j++) {
8494            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
8495            for (uint32_t k = 0; k < layerCount; k++) {
8496                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
8497                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
8498                IMAGE_CMD_BUF_LAYOUT_NODE node;
8499                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
8500                    SetLayout(pCB, mem_barrier->image, sub, {mem_barrier->oldLayout, mem_barrier->newLayout});
8501                    continue;
8502                }
8503                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
8504                    // TODO: Set memory invalid which is in mem_tracker currently
8505                } else if (node.layout != mem_barrier->oldLayout) {
8506                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8507                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
8508                                                                                    "when current layout is %s.",
8509                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
8510                }
8511                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
8512            }
8513        }
8514    }
8515    return skip;
8516}
8517
8518// Print readable FlagBits in FlagMask
8519std::string string_VkAccessFlags(VkAccessFlags accessMask) {
8520    std::string result;
8521    std::string separator;
8522
8523    if (accessMask == 0) {
8524        result = "[None]";
8525    } else {
8526        result = "[";
8527        for (auto i = 0; i < 32; i++) {
8528            if (accessMask & (1 << i)) {
8529                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
8530                separator = " | ";
8531            }
8532        }
8533        result = result + "]";
8534    }
8535    return result;
8536}
8537
8538// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
8539// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
8540// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
8541VkBool32 ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8542                          const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits, const char *type) {
8543    VkBool32 skip_call = VK_FALSE;
8544
8545    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
8546        if (accessMask & !(required_bit | optional_bits)) {
8547            // TODO: Verify against Valid Use
8548            skip_call |=
8549                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8550                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8551                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8552        }
8553    } else {
8554        if (!required_bit) {
8555            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8556                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
8557                                                                  "%s when layout is %s, unless the app has previously added a "
8558                                                                  "barrier for this transition.",
8559                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
8560                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
8561        } else {
8562            std::string opt_bits;
8563            if (optional_bits != 0) {
8564                std::stringstream ss;
8565                ss << optional_bits;
8566                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
8567            }
8568            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8569                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
8570                                                                  "layout is %s, unless the app has previously added a barrier for "
8571                                                                  "this transition.",
8572                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
8573                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
8574        }
8575    }
8576    return skip_call;
8577}
8578
8579VkBool32 ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8580                                     const VkImageLayout &layout, const char *type) {
8581    VkBool32 skip_call = VK_FALSE;
8582    switch (layout) {
8583    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
8584        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
8585                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
8586        break;
8587    }
8588    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
8589        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
8590                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
8591        break;
8592    }
8593    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
8594        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
8595        break;
8596    }
8597    case VK_IMAGE_LAYOUT_PREINITIALIZED: {
8598        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
8599        break;
8600    }
8601    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
8602        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8603                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8604        break;
8605    }
8606    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
8607        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8608                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8609        break;
8610    }
8611    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
8612        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
8613        break;
8614    }
8615    case VK_IMAGE_LAYOUT_UNDEFINED: {
8616        if (accessMask != 0) {
8617            // TODO: Verify against Valid Use section spec
8618            skip_call |=
8619                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8620                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8621                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8622        }
8623        break;
8624    }
8625    case VK_IMAGE_LAYOUT_GENERAL:
8626    default: { break; }
8627    }
8628    return skip_call;
8629}
8630
8631VkBool32 ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8632                          const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
8633                          const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
8634                          const VkImageMemoryBarrier *pImageMemBarriers) {
8635    VkBool32 skip_call = VK_FALSE;
8636    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8637    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8638    if (pCB->activeRenderPass && memBarrierCount) {
8639        if (!dev_data->renderPassMap[pCB->activeRenderPass]->hasSelfDependency[pCB->activeSubpass]) {
8640            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8641                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
8642                                                                  "with no self dependency specified.",
8643                                 funcName, pCB->activeSubpass);
8644        }
8645    }
8646    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
8647        auto mem_barrier = &pImageMemBarriers[i];
8648        auto image_data = dev_data->imageMap.find(mem_barrier->image);
8649        if (image_data != dev_data->imageMap.end()) {
8650            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
8651            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
8652            if (image_data->second.createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
8653                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
8654                // be VK_QUEUE_FAMILY_IGNORED
8655                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
8656                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8657                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8658                                         "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
8659                                         "VK_SHARING_MODE_CONCURRENT.  Src and dst "
8660                                         " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
8661                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8662                }
8663            } else {
8664                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
8665                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
8666                // or both be a valid queue family
8667                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
8668                    (src_q_f_index != dst_q_f_index)) {
8669                    skip_call |=
8670                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8671                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
8672                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
8673                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
8674                                                                     "must be.",
8675                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8676                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
8677                           ((src_q_f_index >= dev_data->physDevProperties.queue_family_properties.size()) ||
8678                            (dst_q_f_index >= dev_data->physDevProperties.queue_family_properties.size()))) {
8679                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8680                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8681                                         "%s: Image 0x%" PRIx64 " was created with sharingMode "
8682                                         "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
8683                                         " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
8684                                         "queueFamilies crated for this device.",
8685                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
8686                                         dst_q_f_index, dev_data->physDevProperties.queue_family_properties.size());
8687                }
8688            }
8689        }
8690
8691        if (mem_barrier) {
8692            skip_call |=
8693                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
8694            skip_call |=
8695                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
8696            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
8697                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8698                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
8699                                                         "PREINITIALIZED.",
8700                        funcName);
8701            }
8702            auto image_data = dev_data->imageMap.find(mem_barrier->image);
8703            VkFormat format;
8704            uint32_t arrayLayers, mipLevels;
8705            bool imageFound = false;
8706            if (image_data != dev_data->imageMap.end()) {
8707                format = image_data->second.createInfo.format;
8708                arrayLayers = image_data->second.createInfo.arrayLayers;
8709                mipLevels = image_data->second.createInfo.mipLevels;
8710                imageFound = true;
8711            } else if (dev_data->device_extensions.wsi_enabled) {
8712                auto imageswap_data = dev_data->device_extensions.imageToSwapchainMap.find(mem_barrier->image);
8713                if (imageswap_data != dev_data->device_extensions.imageToSwapchainMap.end()) {
8714                    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(imageswap_data->second);
8715                    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
8716                        format = swapchain_data->second->createInfo.imageFormat;
8717                        arrayLayers = swapchain_data->second->createInfo.imageArrayLayers;
8718                        mipLevels = 1;
8719                        imageFound = true;
8720                    }
8721                }
8722            }
8723            if (imageFound) {
8724                if (vk_format_is_depth_and_stencil(format) &&
8725                    (!(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
8726                     !(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))) {
8727                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8728                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth and stencil format and thus must "
8729                                                             "have both VK_IMAGE_ASPECT_DEPTH_BIT and "
8730                                                             "VK_IMAGE_ASPECT_STENCIL_BIT set.",
8731                            funcName);
8732                }
8733                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
8734                                     ? 1
8735                                     : mem_barrier->subresourceRange.layerCount;
8736                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
8737                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8738                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
8739                                                             "baseArrayLayer (%d) and layerCount (%d) be less "
8740                                                             "than or equal to the total number of layers (%d).",
8741                            funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
8742                            arrayLayers);
8743                }
8744                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
8745                                     ? 1
8746                                     : mem_barrier->subresourceRange.levelCount;
8747                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
8748                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8749                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
8750                                                             "(%d) and levelCount (%d) be less than or equal to "
8751                                                             "the total number of levels (%d).",
8752                            funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
8753                            mipLevels);
8754                }
8755            }
8756        }
8757    }
8758    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
8759        auto mem_barrier = &pBufferMemBarriers[i];
8760        if (pCB->activeRenderPass) {
8761            skip_call |=
8762                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8763                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
8764        }
8765        if (!mem_barrier)
8766            continue;
8767
8768        // Validate buffer barrier queue family indices
8769        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8770             mem_barrier->srcQueueFamilyIndex >= dev_data->physDevProperties.queue_family_properties.size()) ||
8771            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8772             mem_barrier->dstQueueFamilyIndex >= dev_data->physDevProperties.queue_family_properties.size())) {
8773            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8774                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8775                                 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
8776                                 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
8777                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8778                                 dev_data->physDevProperties.queue_family_properties.size());
8779        }
8780
8781        auto buffer_data = dev_data->bufferMap.find(mem_barrier->buffer);
8782        uint64_t buffer_size =
8783            buffer_data->second.create_info ? reinterpret_cast<uint64_t &>(buffer_data->second.create_info->size) : 0;
8784        if (buffer_data != dev_data->bufferMap.end()) {
8785            if (mem_barrier->offset >= buffer_size) {
8786                skip_call |=
8787                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8788                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64
8789                                                             " whose sum is not less than total size %" PRIu64 ".",
8790                            funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8791                            reinterpret_cast<const uint64_t &>(mem_barrier->offset), buffer_size);
8792            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
8793                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8794                                     __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
8795                                     "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64 " and size %" PRIu64
8796                                     " whose sum is greater than total size %" PRIu64 ".",
8797                                     funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8798                                     reinterpret_cast<const uint64_t &>(mem_barrier->offset),
8799                                     reinterpret_cast<const uint64_t &>(mem_barrier->size), buffer_size);
8800            }
8801        }
8802    }
8803    return skip_call;
8804}
8805
8806VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8807vkCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
8808                VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8809                uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8810                uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8811    VkBool32 skipCall = VK_FALSE;
8812    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8813    loader_platform_thread_lock_mutex(&globalLock);
8814    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8815    if (pCB) {
8816        VkPipelineStageFlags stageMask = 0;
8817        for (uint32_t i = 0; i < eventCount; ++i) {
8818            pCB->waitedEvents.push_back(pEvents[i]);
8819            pCB->events.push_back(pEvents[i]);
8820            auto event_data = pCB->eventToStageMap.find(pEvents[i]);
8821            if (event_data != pCB->eventToStageMap.end()) {
8822                stageMask |= event_data->second;
8823            } else {
8824                auto global_event_data = dev_data->eventMap.find(pEvents[i]);
8825                if (global_event_data == dev_data->eventMap.end()) {
8826                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8827                                        reinterpret_cast<const uint64_t &>(pEvents[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
8828                                        "Fence 0x%" PRIx64 " cannot be waited on if it has never been set.",
8829                                        reinterpret_cast<const uint64_t &>(pEvents[i]));
8830                } else {
8831                    stageMask |= global_event_data->second.stageMask;
8832                }
8833            }
8834        }
8835        if (sourceStageMask != stageMask) {
8836            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8837                                DRAWSTATE_INVALID_FENCE, "DS", "srcStageMask in vkCmdWaitEvents must be the bitwise OR of the "
8838                                                               "stageMask parameters used in calls to vkCmdSetEvent and "
8839                                                               "VK_PIPELINE_STAGE_HOST_BIT if used with vkSetEvent.");
8840        }
8841        if (pCB->state == CB_RECORDING) {
8842            skipCall |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
8843        } else {
8844            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
8845        }
8846        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8847        skipCall |=
8848            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8849                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8850    }
8851    loader_platform_thread_unlock_mutex(&globalLock);
8852    if (VK_FALSE == skipCall)
8853        dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
8854                                                       memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8855                                                       pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8856}
8857
8858VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8859vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
8860                     VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8861                     uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8862                     uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8863    VkBool32 skipCall = VK_FALSE;
8864    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8865    loader_platform_thread_lock_mutex(&globalLock);
8866    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8867    if (pCB) {
8868        skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8869        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8870        skipCall |=
8871            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8872                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8873    }
8874    loader_platform_thread_unlock_mutex(&globalLock);
8875    if (VK_FALSE == skipCall)
8876        dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
8877                                                            memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8878                                                            pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8879}
8880
8881VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8882vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8883    VkBool32 skipCall = VK_FALSE;
8884    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8885    loader_platform_thread_lock_mutex(&globalLock);
8886    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8887    if (pCB) {
8888        QueryObject query = {queryPool, slot};
8889        pCB->activeQueries.insert(query);
8890        if (!pCB->startedQueries.count(query)) {
8891            pCB->startedQueries.insert(query);
8892        }
8893        skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8894    }
8895    loader_platform_thread_unlock_mutex(&globalLock);
8896    if (VK_FALSE == skipCall)
8897        dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
8898}
8899
8900VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8901    VkBool32 skipCall = VK_FALSE;
8902    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8903    loader_platform_thread_lock_mutex(&globalLock);
8904    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8905    if (pCB) {
8906        QueryObject query = {queryPool, slot};
8907        if (!pCB->activeQueries.count(query)) {
8908            skipCall |=
8909                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8910                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool %" PRIu64 ", index %d",
8911                        (uint64_t)(queryPool), slot);
8912        } else {
8913            pCB->activeQueries.erase(query);
8914        }
8915        pCB->queryToStateMap[query] = 1;
8916        if (pCB->state == CB_RECORDING) {
8917            skipCall |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
8918        } else {
8919            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
8920        }
8921    }
8922    loader_platform_thread_unlock_mutex(&globalLock);
8923    if (VK_FALSE == skipCall)
8924        dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
8925}
8926
8927VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8928vkCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
8929    VkBool32 skipCall = VK_FALSE;
8930    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8931    loader_platform_thread_lock_mutex(&globalLock);
8932    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8933    if (pCB) {
8934        for (uint32_t i = 0; i < queryCount; i++) {
8935            QueryObject query = {queryPool, firstQuery + i};
8936            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
8937            pCB->queryToStateMap[query] = 0;
8938        }
8939        if (pCB->state == CB_RECORDING) {
8940            skipCall |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8941        } else {
8942            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
8943        }
8944        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
8945    }
8946    loader_platform_thread_unlock_mutex(&globalLock);
8947    if (VK_FALSE == skipCall)
8948        dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8949}
8950
8951VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8952vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
8953                          VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
8954    VkBool32 skipCall = VK_FALSE;
8955    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8956    loader_platform_thread_lock_mutex(&globalLock);
8957    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8958#if MTMERGE
8959    VkDeviceMemory mem;
8960    auto cb_data = dev_data->cbMap.find(commandBuffer);
8961    skipCall |=
8962        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8963    if (cb_data != dev_data->cbMap.end()) {
8964        std::function<VkBool32()> function = [=]() {
8965            set_memory_valid(dev_data, mem, true);
8966            return VK_FALSE;
8967        };
8968        cb_data->second.validate_functions.push_back(function);
8969    }
8970    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyQueryPoolResults");
8971    // Validate that DST buffer has correct usage flags set
8972    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8973                                            "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8974#endif
8975    if (pCB) {
8976        for (uint32_t i = 0; i < queryCount; i++) {
8977            QueryObject query = {queryPool, firstQuery + i};
8978            if (!pCB->queryToStateMap[query]) {
8979                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8980                                    __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
8981                                    "Requesting a copy from query to buffer with invalid query: queryPool %" PRIu64 ", index %d",
8982                                    (uint64_t)(queryPool), firstQuery + i);
8983            }
8984        }
8985        if (pCB->state == CB_RECORDING) {
8986            skipCall |= addCmd(dev_data, pCB, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8987        } else {
8988            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
8989        }
8990        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyQueryPoolResults");
8991    }
8992    loader_platform_thread_unlock_mutex(&globalLock);
8993    if (VK_FALSE == skipCall)
8994        dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
8995                                                                 dstOffset, stride, flags);
8996}
8997
8998VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
8999                                                              VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
9000                                                              const void *pValues) {
9001    bool skipCall = false;
9002    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9003    loader_platform_thread_lock_mutex(&globalLock);
9004    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9005    if (pCB) {
9006        if (pCB->state == CB_RECORDING) {
9007            skipCall |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
9008        } else {
9009            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
9010        }
9011    }
9012    if ((offset + size) > dev_data->physDevProperties.properties.limits.maxPushConstantsSize) {
9013        skipCall |= validatePushConstantSize(dev_data, offset, size, "vkCmdPushConstants()");
9014    }
9015    // TODO : Add warning if push constant update doesn't align with range
9016    loader_platform_thread_unlock_mutex(&globalLock);
9017    if (!skipCall)
9018        dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
9019}
9020
9021VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
9022vkCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
9023    VkBool32 skipCall = VK_FALSE;
9024    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9025    loader_platform_thread_lock_mutex(&globalLock);
9026    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9027    if (pCB) {
9028        QueryObject query = {queryPool, slot};
9029        pCB->queryToStateMap[query] = 1;
9030        if (pCB->state == CB_RECORDING) {
9031            skipCall |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
9032        } else {
9033            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
9034        }
9035    }
9036    loader_platform_thread_unlock_mutex(&globalLock);
9037    if (VK_FALSE == skipCall)
9038        dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
9039}
9040
9041VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
9042                                                                   const VkAllocationCallbacks *pAllocator,
9043                                                                   VkFramebuffer *pFramebuffer) {
9044    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9045    VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
9046    if (VK_SUCCESS == result) {
9047        // Shadow create info and store in map
9048        VkFramebufferCreateInfo *localFBCI = new VkFramebufferCreateInfo(*pCreateInfo);
9049        if (pCreateInfo->pAttachments) {
9050            localFBCI->pAttachments = new VkImageView[localFBCI->attachmentCount];
9051            memcpy((void *)localFBCI->pAttachments, pCreateInfo->pAttachments, localFBCI->attachmentCount * sizeof(VkImageView));
9052        }
9053        FRAMEBUFFER_NODE fbNode = {};
9054        fbNode.createInfo = *localFBCI;
9055        std::pair<VkFramebuffer, FRAMEBUFFER_NODE> fbPair(*pFramebuffer, fbNode);
9056        loader_platform_thread_lock_mutex(&globalLock);
9057#if MTMERGE
9058        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9059            VkImageView view = pCreateInfo->pAttachments[i];
9060            auto view_data = dev_data->imageViewMap.find(view);
9061            if (view_data == dev_data->imageViewMap.end()) {
9062                continue;
9063            }
9064            MT_FB_ATTACHMENT_INFO fb_info;
9065            get_mem_binding_from_object(dev_data, device, (uint64_t)(view_data->second.image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9066                                        &fb_info.mem);
9067            fb_info.image = view_data->second.image;
9068            dev_data->fbMap[*pFramebuffer].attachments.push_back(fb_info);
9069        }
9070#endif
9071        dev_data->frameBufferMap.insert(fbPair);
9072        loader_platform_thread_unlock_mutex(&globalLock);
9073    }
9074    return result;
9075}
9076
9077VkBool32 FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
9078                        std::unordered_set<uint32_t> &processed_nodes) {
9079    // If we have already checked this node we have not found a dependency path so return false.
9080    if (processed_nodes.count(index))
9081        return VK_FALSE;
9082    processed_nodes.insert(index);
9083    const DAGNode &node = subpass_to_node[index];
9084    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
9085    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
9086        for (auto elem : node.prev) {
9087            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
9088                return VK_TRUE;
9089        }
9090    } else {
9091        return VK_TRUE;
9092    }
9093    return VK_FALSE;
9094}
9095
9096VkBool32 CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
9097                               const std::vector<DAGNode> &subpass_to_node, VkBool32 &skip_call) {
9098    VkBool32 result = VK_TRUE;
9099    // Loop through all subpasses that share the same attachment and make sure a dependency exists
9100    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
9101        if (subpass == dependent_subpasses[k])
9102            continue;
9103        const DAGNode &node = subpass_to_node[subpass];
9104        // Check for a specified dependency between the two nodes. If one exists we are done.
9105        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
9106        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
9107        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
9108            // If no dependency exits an implicit dependency still might. If so, warn and if not throw an error.
9109            std::unordered_set<uint32_t> processed_nodes;
9110            if (FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
9111                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes)) {
9112                // TODO: Verify against Valid Use section of spec
9113                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9114                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9115                                     "A dependency between subpasses %d and %d must exist but only an implicit one is specified.",
9116                                     subpass, dependent_subpasses[k]);
9117            } else {
9118                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9119                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9120                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
9121                                     dependent_subpasses[k]);
9122                result = VK_FALSE;
9123            }
9124        }
9125    }
9126    return result;
9127}
9128
9129VkBool32 CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
9130                        const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, VkBool32 &skip_call) {
9131    const DAGNode &node = subpass_to_node[index];
9132    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
9133    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9134    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9135        if (attachment == subpass.pColorAttachments[j].attachment)
9136            return VK_TRUE;
9137    }
9138    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9139        if (attachment == subpass.pDepthStencilAttachment->attachment)
9140            return VK_TRUE;
9141    }
9142    VkBool32 result = VK_FALSE;
9143    // Loop through previous nodes and see if any of them write to the attachment.
9144    for (auto elem : node.prev) {
9145        result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
9146    }
9147    // If the attachment was written to by a previous node than this node needs to preserve it.
9148    if (result && depth > 0) {
9149        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9150        VkBool32 has_preserved = VK_FALSE;
9151        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9152            if (subpass.pPreserveAttachments[j] == attachment) {
9153                has_preserved = VK_TRUE;
9154                break;
9155            }
9156        }
9157        if (has_preserved == VK_FALSE) {
9158            skip_call |=
9159                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9160                        DRAWSTATE_INVALID_RENDERPASS, "DS",
9161                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
9162        }
9163    }
9164    return result;
9165}
9166
9167template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
9168    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
9169           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
9170}
9171
9172bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
9173    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
9174            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
9175}
9176
9177VkBool32 ValidateDependencies(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin,
9178                              const std::vector<DAGNode> &subpass_to_node) {
9179    VkBool32 skip_call = VK_FALSE;
9180    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
9181    const VkRenderPassCreateInfo *pCreateInfo = my_data->renderPassMap.at(pRenderPassBegin->renderPass)->pCreateInfo;
9182    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
9183    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
9184    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
9185    // Find overlapping attachments
9186    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9187        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
9188            VkImageView viewi = pFramebufferInfo->pAttachments[i];
9189            VkImageView viewj = pFramebufferInfo->pAttachments[j];
9190            if (viewi == viewj) {
9191                overlapping_attachments[i].push_back(j);
9192                overlapping_attachments[j].push_back(i);
9193                continue;
9194            }
9195            auto view_data_i = my_data->imageViewMap.find(viewi);
9196            auto view_data_j = my_data->imageViewMap.find(viewj);
9197            if (view_data_i == my_data->imageViewMap.end() || view_data_j == my_data->imageViewMap.end()) {
9198                continue;
9199            }
9200            if (view_data_i->second.image == view_data_j->second.image &&
9201                isRegionOverlapping(view_data_i->second.subresourceRange, view_data_j->second.subresourceRange)) {
9202                overlapping_attachments[i].push_back(j);
9203                overlapping_attachments[j].push_back(i);
9204                continue;
9205            }
9206            auto image_data_i = my_data->imageMap.find(view_data_i->second.image);
9207            auto image_data_j = my_data->imageMap.find(view_data_j->second.image);
9208            if (image_data_i == my_data->imageMap.end() || image_data_j == my_data->imageMap.end()) {
9209                continue;
9210            }
9211            if (image_data_i->second.mem == image_data_j->second.mem &&
9212                isRangeOverlapping(image_data_i->second.memOffset, image_data_i->second.memSize, image_data_j->second.memOffset,
9213                                   image_data_j->second.memSize)) {
9214                overlapping_attachments[i].push_back(j);
9215                overlapping_attachments[j].push_back(i);
9216            }
9217        }
9218    }
9219    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
9220        uint32_t attachment = i;
9221        for (auto other_attachment : overlapping_attachments[i]) {
9222            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9223                skip_call |=
9224                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9225                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9226                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9227                            attachment, other_attachment);
9228            }
9229            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9230                skip_call |=
9231                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9232                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9233                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9234                            other_attachment, attachment);
9235            }
9236        }
9237    }
9238    // Find for each attachment the subpasses that use them.
9239    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9240        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9241        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9242            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9243            input_attachment_to_subpass[attachment].push_back(i);
9244            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9245                input_attachment_to_subpass[overlapping_attachment].push_back(i);
9246            }
9247        }
9248        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9249            uint32_t attachment = subpass.pColorAttachments[j].attachment;
9250            output_attachment_to_subpass[attachment].push_back(i);
9251            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9252                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9253            }
9254        }
9255        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9256            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9257            output_attachment_to_subpass[attachment].push_back(i);
9258            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9259                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9260            }
9261        }
9262    }
9263    // If there is a dependency needed make sure one exists
9264    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9265        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9266        // If the attachment is an input then all subpasses that output must have a dependency relationship
9267        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9268            const uint32_t &attachment = subpass.pInputAttachments[j].attachment;
9269            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9270        }
9271        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
9272        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9273            const uint32_t &attachment = subpass.pColorAttachments[j].attachment;
9274            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9275            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9276        }
9277        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9278            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
9279            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9280            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9281        }
9282    }
9283    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
9284    // written.
9285    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9286        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9287        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9288            CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
9289        }
9290    }
9291    return skip_call;
9292}
9293
9294VkBool32 ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
9295    VkBool32 skip = VK_FALSE;
9296
9297    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9298        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9299        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9300            if (subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
9301                subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
9302                if (subpass.pInputAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
9303                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9304                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9305                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9306                                    "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
9307                } else {
9308                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9309                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9310                                    "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
9311                                    string_VkImageLayout(subpass.pInputAttachments[j].layout));
9312                }
9313            }
9314        }
9315        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9316            if (subpass.pColorAttachments[j].layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
9317                if (subpass.pColorAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
9318                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9319                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9320                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9321                                    "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
9322                } else {
9323                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9324                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9325                                    "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
9326                                    string_VkImageLayout(subpass.pColorAttachments[j].layout));
9327                }
9328            }
9329        }
9330        if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9331            if (subpass.pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
9332                if (subpass.pDepthStencilAttachment->layout == VK_IMAGE_LAYOUT_GENERAL) {
9333                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9334                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9335                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9336                                    "Layout for depth attachment is GENERAL but should be DEPTH_STENCIL_ATTACHMENT_OPTIMAL.");
9337                } else {
9338                    skip |=
9339                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9340                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9341                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.",
9342                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
9343                }
9344            }
9345        }
9346    }
9347    return skip;
9348}
9349
9350VkBool32 CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9351                       std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
9352    VkBool32 skip_call = VK_FALSE;
9353    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9354        DAGNode &subpass_node = subpass_to_node[i];
9355        subpass_node.pass = i;
9356    }
9357    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9358        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
9359        if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
9360            dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9361            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9362                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
9363                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
9364        } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
9365            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9366                                 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
9367        } else if (dependency.srcSubpass == dependency.dstSubpass) {
9368            has_self_dependency[dependency.srcSubpass] = true;
9369        }
9370        if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9371            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
9372        }
9373        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
9374            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
9375        }
9376    }
9377    return skip_call;
9378}
9379// TODOSC : Add intercept of vkCreateShaderModule
9380
9381VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
9382                                                                    const VkAllocationCallbacks *pAllocator,
9383                                                                    VkShaderModule *pShaderModule) {
9384    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9385    VkBool32 skip_call = VK_FALSE;
9386    if (!shader_is_spirv(pCreateInfo)) {
9387        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
9388                             /* dev */ 0, __LINE__, SHADER_CHECKER_NON_SPIRV_SHADER, "SC", "Shader is not SPIR-V");
9389    }
9390
9391    if (VK_FALSE != skip_call)
9392        return VK_ERROR_VALIDATION_FAILED_EXT;
9393
9394    VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
9395
9396    if (res == VK_SUCCESS) {
9397        loader_platform_thread_lock_mutex(&globalLock);
9398        my_data->shaderModuleMap[*pShaderModule] = new shader_module(pCreateInfo);
9399        loader_platform_thread_unlock_mutex(&globalLock);
9400    }
9401    return res;
9402}
9403
9404VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9405                                                                  const VkAllocationCallbacks *pAllocator,
9406                                                                  VkRenderPass *pRenderPass) {
9407    VkBool32 skip_call = VK_FALSE;
9408    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9409    loader_platform_thread_lock_mutex(&globalLock);
9410    // Create DAG
9411    std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
9412    std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
9413    skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
9414    // Validate
9415    skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
9416    if (VK_FALSE != skip_call) {
9417        return VK_ERROR_VALIDATION_FAILED_EXT;
9418    }
9419    loader_platform_thread_unlock_mutex(&globalLock);
9420    VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
9421    if (VK_SUCCESS == result) {
9422        loader_platform_thread_lock_mutex(&globalLock);
9423#if MTMERGE
9424        // MTMTODO : Merge with code from below to eliminate duplication
9425        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9426            VkAttachmentDescription desc = pCreateInfo->pAttachments[i];
9427            MT_PASS_ATTACHMENT_INFO pass_info;
9428            pass_info.load_op = desc.loadOp;
9429            pass_info.store_op = desc.storeOp;
9430            pass_info.attachment = i;
9431            dev_data->passMap[*pRenderPass].attachments.push_back(pass_info);
9432        }
9433        // TODO: Maybe fill list and then copy instead of locking
9434        std::unordered_map<uint32_t, bool> &attachment_first_read = dev_data->passMap[*pRenderPass].attachment_first_read;
9435        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = dev_data->passMap[*pRenderPass].attachment_first_layout;
9436        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9437            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9438            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9439                uint32_t attachment = subpass.pInputAttachments[j].attachment;
9440                if (attachment_first_read.count(attachment))
9441                    continue;
9442                attachment_first_read.insert(std::make_pair(attachment, true));
9443                attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
9444            }
9445            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9446                uint32_t attachment = subpass.pColorAttachments[j].attachment;
9447                if (attachment_first_read.count(attachment))
9448                    continue;
9449                attachment_first_read.insert(std::make_pair(attachment, false));
9450                attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
9451            }
9452            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9453                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9454                if (attachment_first_read.count(attachment))
9455                    continue;
9456                attachment_first_read.insert(std::make_pair(attachment, false));
9457                attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
9458            }
9459        }
9460#endif
9461        // TODOSC : Merge in tracking of renderpass from shader_checker
9462        // Shadow create info and store in map
9463        VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
9464        if (pCreateInfo->pAttachments) {
9465            localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount];
9466            memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments,
9467                   localRPCI->attachmentCount * sizeof(VkAttachmentDescription));
9468        }
9469        if (pCreateInfo->pSubpasses) {
9470            localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount];
9471            memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription));
9472
9473            for (uint32_t i = 0; i < localRPCI->subpassCount; i++) {
9474                VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i];
9475                const uint32_t attachmentCount = subpass->inputAttachmentCount +
9476                                                 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) +
9477                                                 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
9478                VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount];
9479
9480                memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount);
9481                subpass->pInputAttachments = attachments;
9482                attachments += subpass->inputAttachmentCount;
9483
9484                memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9485                subpass->pColorAttachments = attachments;
9486                attachments += subpass->colorAttachmentCount;
9487
9488                if (subpass->pResolveAttachments) {
9489                    memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9490                    subpass->pResolveAttachments = attachments;
9491                    attachments += subpass->colorAttachmentCount;
9492                }
9493
9494                if (subpass->pDepthStencilAttachment) {
9495                    memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1);
9496                    subpass->pDepthStencilAttachment = attachments;
9497                    attachments += 1;
9498                }
9499
9500                memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount);
9501                subpass->pPreserveAttachments = &attachments->attachment;
9502            }
9503        }
9504        if (pCreateInfo->pDependencies) {
9505            localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
9506            memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies,
9507                   localRPCI->dependencyCount * sizeof(VkSubpassDependency));
9508        }
9509        dev_data->renderPassMap[*pRenderPass] = new RENDER_PASS_NODE(localRPCI);
9510        dev_data->renderPassMap[*pRenderPass]->hasSelfDependency = has_self_dependency;
9511        dev_data->renderPassMap[*pRenderPass]->subpassToNode = subpass_to_node;
9512        loader_platform_thread_unlock_mutex(&globalLock);
9513    }
9514    return result;
9515}
9516// Free the renderpass shadow
9517static void deleteRenderPasses(layer_data *my_data) {
9518    if (my_data->renderPassMap.size() <= 0)
9519        return;
9520    for (auto ii = my_data->renderPassMap.begin(); ii != my_data->renderPassMap.end(); ++ii) {
9521        const VkRenderPassCreateInfo *pRenderPassInfo = (*ii).second->pCreateInfo;
9522        delete[] pRenderPassInfo->pAttachments;
9523        if (pRenderPassInfo->pSubpasses) {
9524            for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) {
9525                // Attachements are all allocated in a block, so just need to
9526                //  find the first non-null one to delete
9527                if (pRenderPassInfo->pSubpasses[i].pInputAttachments) {
9528                    delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments;
9529                } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) {
9530                    delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments;
9531                } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) {
9532                    delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments;
9533                } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) {
9534                    delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments;
9535                }
9536            }
9537            delete[] pRenderPassInfo->pSubpasses;
9538        }
9539        delete[] pRenderPassInfo->pDependencies;
9540        delete pRenderPassInfo;
9541        delete (*ii).second;
9542    }
9543    my_data->renderPassMap.clear();
9544}
9545
9546VkBool32 VerifyFramebufferAndRenderPassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9547    VkBool32 skip_call = VK_FALSE;
9548    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9549    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9550    const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
9551    const VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer].createInfo;
9552    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
9553        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9554                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
9555                                                                 "with a different number of attachments.");
9556    }
9557    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9558        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9559        auto image_data = dev_data->imageViewMap.find(image_view);
9560        assert(image_data != dev_data->imageViewMap.end());
9561        const VkImage &image = image_data->second.image;
9562        const VkImageSubresourceRange &subRange = image_data->second.subresourceRange;
9563        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
9564                                             pRenderPassInfo->pAttachments[i].initialLayout};
9565        // TODO: Do not iterate over every possibility - consolidate where possible
9566        for (uint32_t j = 0; j < subRange.levelCount; j++) {
9567            uint32_t level = subRange.baseMipLevel + j;
9568            for (uint32_t k = 0; k < subRange.layerCount; k++) {
9569                uint32_t layer = subRange.baseArrayLayer + k;
9570                VkImageSubresource sub = {subRange.aspectMask, level, layer};
9571                IMAGE_CMD_BUF_LAYOUT_NODE node;
9572                if (!FindLayout(pCB, image, sub, node)) {
9573                    SetLayout(pCB, image, sub, newNode);
9574                    continue;
9575                }
9576                if (newNode.layout != node.layout) {
9577                    skip_call |=
9578                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9579                                DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using attachment %i "
9580                                                                    "where the "
9581                                                                    "intial layout differs from the starting layout.",
9582                                i);
9583                }
9584            }
9585        }
9586    }
9587    return skip_call;
9588}
9589
9590void TransitionSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const int subpass_index) {
9591    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9592    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9593    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9594    if (render_pass_data == dev_data->renderPassMap.end()) {
9595        return;
9596    }
9597    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9598    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9599    if (framebuffer_data == dev_data->frameBufferMap.end()) {
9600        return;
9601    }
9602    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9603    const VkSubpassDescription &subpass = pRenderPassInfo->pSubpasses[subpass_index];
9604    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9605        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pInputAttachments[j].attachment];
9606        SetLayout(dev_data, pCB, image_view, subpass.pInputAttachments[j].layout);
9607    }
9608    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9609        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pColorAttachments[j].attachment];
9610        SetLayout(dev_data, pCB, image_view, subpass.pColorAttachments[j].layout);
9611    }
9612    if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9613        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pDepthStencilAttachment->attachment];
9614        SetLayout(dev_data, pCB, image_view, subpass.pDepthStencilAttachment->layout);
9615    }
9616}
9617
9618VkBool32 validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
9619    VkBool32 skip_call = VK_FALSE;
9620    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
9621        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9622                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
9623                             cmd_name.c_str());
9624    }
9625    return skip_call;
9626}
9627
9628void TransitionFinalSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9629    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9630    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9631    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9632    if (render_pass_data == dev_data->renderPassMap.end()) {
9633        return;
9634    }
9635    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9636    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9637    if (framebuffer_data == dev_data->frameBufferMap.end()) {
9638        return;
9639    }
9640    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9641    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9642        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9643        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
9644    }
9645}
9646
9647VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
9648vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
9649    VkBool32 skipCall = VK_FALSE;
9650    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9651    loader_platform_thread_lock_mutex(&globalLock);
9652    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9653    if (pCB) {
9654        if (pRenderPassBegin && pRenderPassBegin->renderPass) {
9655#if MTMERGE
9656            auto pass_data = dev_data->passMap.find(pRenderPassBegin->renderPass);
9657            if (pass_data != dev_data->passMap.end()) {
9658                MT_PASS_INFO &pass_info = pass_data->second;
9659                pass_info.fb = pRenderPassBegin->framebuffer;
9660                auto cb_data = dev_data->cbMap.find(commandBuffer);
9661                for (size_t i = 0; i < pass_info.attachments.size(); ++i) {
9662                    MT_FB_ATTACHMENT_INFO &fb_info = dev_data->fbMap[pass_info.fb].attachments[i];
9663                    if (pass_info.attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
9664                        if (cb_data != dev_data->cbMap.end()) {
9665                            std::function<VkBool32()> function = [=]() {
9666                                set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9667                                return VK_FALSE;
9668                            };
9669                            cb_data->second.validate_functions.push_back(function);
9670                        }
9671                        VkImageLayout &attachment_layout = pass_info.attachment_first_layout[pass_info.attachments[i].attachment];
9672                        if (attachment_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL ||
9673                            attachment_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
9674                            skipCall |=
9675                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9676                                        VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, (uint64_t)(pRenderPassBegin->renderPass), __LINE__,
9677                                        MEMTRACK_INVALID_LAYOUT, "MEM", "Cannot clear attachment %d with invalid first layout %d.",
9678                                        pass_info.attachments[i].attachment, attachment_layout);
9679                        }
9680                    } else if (pass_info.attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE) {
9681                        if (cb_data != dev_data->cbMap.end()) {
9682                            std::function<VkBool32()> function = [=]() {
9683                                set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9684                                return VK_FALSE;
9685                            };
9686                            cb_data->second.validate_functions.push_back(function);
9687                        }
9688                    } else if (pass_info.attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
9689                        if (cb_data != dev_data->cbMap.end()) {
9690                            std::function<VkBool32()> function = [=]() {
9691                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9692                            };
9693                            cb_data->second.validate_functions.push_back(function);
9694                        }
9695                    }
9696                    if (pass_info.attachment_first_read[pass_info.attachments[i].attachment]) {
9697                        if (cb_data != dev_data->cbMap.end()) {
9698                            std::function<VkBool32()> function = [=]() {
9699                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9700                            };
9701                            cb_data->second.validate_functions.push_back(function);
9702                        }
9703                    }
9704                }
9705                if (cb_data != dev_data->cbMap.end()) {
9706                    cb_data->second.pass = pRenderPassBegin->renderPass;
9707                }
9708            }
9709#endif
9710            skipCall |= VerifyFramebufferAndRenderPassLayouts(commandBuffer, pRenderPassBegin);
9711            auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9712            if (render_pass_data != dev_data->renderPassMap.end()) {
9713                skipCall |= ValidateDependencies(dev_data, pRenderPassBegin, render_pass_data->second->subpassToNode);
9714            }
9715            skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass");
9716            skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass");
9717            skipCall |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
9718            pCB->activeRenderPass = pRenderPassBegin->renderPass;
9719            // This is a shallow copy as that is all that is needed for now
9720            pCB->activeRenderPassBeginInfo = *pRenderPassBegin;
9721            pCB->activeSubpass = 0;
9722            pCB->activeSubpassContents = contents;
9723            pCB->framebuffer = pRenderPassBegin->framebuffer;
9724            // Connect this framebuffer to this cmdBuffer
9725            dev_data->frameBufferMap[pCB->framebuffer].referencingCmdBuffers.insert(pCB->commandBuffer);
9726        } else {
9727            skipCall |=
9728                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9729                        DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
9730        }
9731    }
9732    loader_platform_thread_unlock_mutex(&globalLock);
9733    if (VK_FALSE == skipCall) {
9734        dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
9735        loader_platform_thread_lock_mutex(&globalLock);
9736        // This is a shallow copy as that is all that is needed for now
9737        dev_data->renderPassBeginInfo = *pRenderPassBegin;
9738        dev_data->currentSubpass = 0;
9739        loader_platform_thread_unlock_mutex(&globalLock);
9740    }
9741}
9742
9743VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
9744    VkBool32 skipCall = VK_FALSE;
9745    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9746    loader_platform_thread_lock_mutex(&globalLock);
9747    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9748    TransitionSubpassLayouts(commandBuffer, &dev_data->renderPassBeginInfo, ++dev_data->currentSubpass);
9749    if (pCB) {
9750        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
9751        skipCall |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
9752        pCB->activeSubpass++;
9753        pCB->activeSubpassContents = contents;
9754        TransitionSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
9755        if (pCB->lastBoundPipeline) {
9756            skipCall |= validatePipelineState(dev_data, pCB, VK_PIPELINE_BIND_POINT_GRAPHICS, pCB->lastBoundPipeline);
9757        }
9758        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
9759    }
9760    loader_platform_thread_unlock_mutex(&globalLock);
9761    if (VK_FALSE == skipCall)
9762        dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents);
9763}
9764
9765VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(VkCommandBuffer commandBuffer) {
9766    VkBool32 skipCall = VK_FALSE;
9767    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9768    loader_platform_thread_lock_mutex(&globalLock);
9769#if MTMERGE
9770    auto cb_data = dev_data->cbMap.find(commandBuffer);
9771    if (cb_data != dev_data->cbMap.end()) {
9772        auto pass_data = dev_data->passMap.find(cb_data->second.pass);
9773        if (pass_data != dev_data->passMap.end()) {
9774            MT_PASS_INFO &pass_info = pass_data->second;
9775            for (size_t i = 0; i < pass_info.attachments.size(); ++i) {
9776                MT_FB_ATTACHMENT_INFO &fb_info = dev_data->fbMap[pass_info.fb].attachments[i];
9777                if (pass_info.attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
9778                    if (cb_data != dev_data->cbMap.end()) {
9779                        std::function<VkBool32()> function = [=]() {
9780                            set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9781                            return VK_FALSE;
9782                        };
9783                        cb_data->second.validate_functions.push_back(function);
9784                    }
9785                } else if (pass_info.attachments[i].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE) {
9786                    if (cb_data != dev_data->cbMap.end()) {
9787                        std::function<VkBool32()> function = [=]() {
9788                            set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9789                            return VK_FALSE;
9790                        };
9791                        cb_data->second.validate_functions.push_back(function);
9792                    }
9793                }
9794            }
9795        }
9796    }
9797#endif
9798    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9799    TransitionFinalSubpassLayouts(commandBuffer, &dev_data->renderPassBeginInfo);
9800    if (pCB) {
9801        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
9802        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
9803        skipCall |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9804        TransitionFinalSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo);
9805        pCB->activeRenderPass = 0;
9806        pCB->activeSubpass = 0;
9807    }
9808    loader_platform_thread_unlock_mutex(&globalLock);
9809    if (VK_FALSE == skipCall)
9810        dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer);
9811}
9812
9813bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9814                                 VkRenderPass primaryPass, uint32_t primaryAttach, uint32_t secondaryAttach, const char *msg) {
9815    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9816                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9817                   "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9818                   " that is not compatible with the current render pass %" PRIx64 "."
9819                   "Attachment %" PRIu32 " is not compatable with %" PRIu32 ". %s",
9820                   (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass), primaryAttach, secondaryAttach,
9821                   msg);
9822}
9823
9824bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9825                                     uint32_t primaryAttach, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9826                                     uint32_t secondaryAttach, bool is_multi) {
9827    bool skip_call = false;
9828    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9829    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9830    if (primary_data->second->pCreateInfo->attachmentCount <= primaryAttach) {
9831        primaryAttach = VK_ATTACHMENT_UNUSED;
9832    }
9833    if (secondary_data->second->pCreateInfo->attachmentCount <= secondaryAttach) {
9834        secondaryAttach = VK_ATTACHMENT_UNUSED;
9835    }
9836    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
9837        return skip_call;
9838    }
9839    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
9840        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9841                                                 secondaryAttach, "The first is unused while the second is not.");
9842        return skip_call;
9843    }
9844    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
9845        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9846                                                 secondaryAttach, "The second is unused while the first is not.");
9847        return skip_call;
9848    }
9849    if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].format !=
9850        secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].format) {
9851        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9852                                                 secondaryAttach, "They have different formats.");
9853    }
9854    if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].samples !=
9855        secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].samples) {
9856        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9857                                                 secondaryAttach, "They have different samples.");
9858    }
9859    if (is_multi &&
9860        primary_data->second->pCreateInfo->pAttachments[primaryAttach].flags !=
9861            secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].flags) {
9862        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9863                                                 secondaryAttach, "They have different flags.");
9864    }
9865    return skip_call;
9866}
9867
9868bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9869                                  VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass, const int subpass, bool is_multi) {
9870    bool skip_call = false;
9871    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9872    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9873    const VkSubpassDescription &primary_desc = primary_data->second->pCreateInfo->pSubpasses[subpass];
9874    const VkSubpassDescription &secondary_desc = secondary_data->second->pCreateInfo->pSubpasses[subpass];
9875    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
9876    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
9877        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
9878        if (i < primary_desc.inputAttachmentCount) {
9879            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
9880        }
9881        if (i < secondary_desc.inputAttachmentCount) {
9882            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
9883        }
9884        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_input_attach, secondaryBuffer,
9885                                                     secondaryPass, secondary_input_attach, is_multi);
9886    }
9887    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
9888    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
9889        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
9890        if (i < primary_desc.colorAttachmentCount) {
9891            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
9892        }
9893        if (i < secondary_desc.colorAttachmentCount) {
9894            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
9895        }
9896        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_color_attach, secondaryBuffer,
9897                                                     secondaryPass, secondary_color_attach, is_multi);
9898        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
9899        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
9900            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
9901        }
9902        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
9903            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
9904        }
9905        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_resolve_attach, secondaryBuffer,
9906                                                     secondaryPass, secondary_resolve_attach, is_multi);
9907    }
9908    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
9909    if (primary_desc.pDepthStencilAttachment) {
9910        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
9911    }
9912    if (secondary_desc.pDepthStencilAttachment) {
9913        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
9914    }
9915    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_depthstencil_attach, secondaryBuffer,
9916                                                 secondaryPass, secondary_depthstencil_attach, is_multi);
9917    return skip_call;
9918}
9919
9920bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9921                                     VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass) {
9922    bool skip_call = false;
9923    // Early exit if renderPass objects are identical (and therefore compatible)
9924    if (primaryPass == secondaryPass)
9925        return skip_call;
9926    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9927    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9928    if (primary_data == dev_data->renderPassMap.end() || primary_data->second == nullptr) {
9929        skip_call |=
9930            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9931                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9932                    "vkCmdExecuteCommands() called w/ invalid current Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9933                    (void *)primaryBuffer, (uint64_t)(primaryPass));
9934        return skip_call;
9935    }
9936    if (secondary_data == dev_data->renderPassMap.end() || secondary_data->second == nullptr) {
9937        skip_call |=
9938            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9939                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9940                    "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9941                    (void *)secondaryBuffer, (uint64_t)(secondaryPass));
9942        return skip_call;
9943    }
9944    if (primary_data->second->pCreateInfo->subpassCount != secondary_data->second->pCreateInfo->subpassCount) {
9945        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9946                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9947                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9948                             " that is not compatible with the current render pass %" PRIx64 "."
9949                             "They have a different number of subpasses.",
9950                             (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass));
9951        return skip_call;
9952    }
9953    bool is_multi = primary_data->second->pCreateInfo->subpassCount > 1;
9954    for (uint32_t i = 0; i < primary_data->second->pCreateInfo->subpassCount; ++i) {
9955        skip_call |=
9956            validateSubpassCompatibility(dev_data, primaryBuffer, primaryPass, secondaryBuffer, secondaryPass, i, is_multi);
9957    }
9958    return skip_call;
9959}
9960
9961bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9962                         VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
9963    bool skip_call = false;
9964    if (!pSubCB->beginInfo.pInheritanceInfo) {
9965        return skip_call;
9966    }
9967    VkFramebuffer primary_fb = pCB->framebuffer;
9968    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9969    if (secondary_fb != VK_NULL_HANDLE) {
9970        if (primary_fb != secondary_fb) {
9971            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9972                                 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9973                                 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a framebuffer %" PRIx64
9974                                 " that is not compatible with the current framebuffer %" PRIx64 ".",
9975                                 (void *)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb));
9976        }
9977        auto fb_data = dev_data->frameBufferMap.find(secondary_fb);
9978        if (fb_data == dev_data->frameBufferMap.end()) {
9979            skip_call |=
9980                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9981                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9982                                                                          "which has invalid framebuffer %" PRIx64 ".",
9983                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
9984            return skip_call;
9985        }
9986        skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb_data->second.createInfo.renderPass,
9987                                                     secondaryBuffer, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9988    }
9989    return skip_call;
9990}
9991
9992bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9993    bool skipCall = false;
9994    unordered_set<int> activeTypes;
9995    for (auto queryObject : pCB->activeQueries) {
9996        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9997        if (queryPoolData != dev_data->queryPoolMap.end()) {
9998            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9999                pSubCB->beginInfo.pInheritanceInfo) {
10000                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
10001                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
10002                    skipCall |= log_msg(
10003                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10004                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10005                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
10006                        "which has invalid active query pool %" PRIx64 ". Pipeline statistics is being queried so the command "
10007                        "buffer must have all bits set on the queryPool.",
10008                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
10009                }
10010            }
10011            activeTypes.insert(queryPoolData->second.createInfo.queryType);
10012        }
10013    }
10014    for (auto queryObject : pSubCB->startedQueries) {
10015        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10016        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
10017            skipCall |=
10018                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10019                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10020                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
10021                        "which has invalid active query pool %" PRIx64 "of type %d but a query of that type has been started on "
10022                        "secondary Cmd Buffer %p.",
10023                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
10024                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
10025        }
10026    }
10027    return skipCall;
10028}
10029
10030VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10031vkCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
10032    VkBool32 skipCall = VK_FALSE;
10033    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10034    loader_platform_thread_lock_mutex(&globalLock);
10035    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10036    if (pCB) {
10037        GLOBAL_CB_NODE *pSubCB = NULL;
10038        for (uint32_t i = 0; i < commandBuffersCount; i++) {
10039            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
10040            if (!pSubCB) {
10041                skipCall |=
10042                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10043                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10044                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p in element %u of pCommandBuffers array.",
10045                            (void *)pCommandBuffers[i], i);
10046            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
10047                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10048                                    __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10049                                    "vkCmdExecuteCommands() called w/ Primary Cmd Buffer %p in element %u of pCommandBuffers "
10050                                    "array. All cmd buffers in pCommandBuffers array must be secondary.",
10051                                    (void *)pCommandBuffers[i], i);
10052            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
10053                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
10054                    skipCall |= log_msg(
10055                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10056                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
10057                        "vkCmdExecuteCommands(): Secondary Command Buffer (%p) executed within render pass (%#" PRIxLEAST64
10058                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
10059                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass);
10060                } else {
10061                    // Make sure render pass is compatible with parent command buffer pass if has continue
10062                    skipCall |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass, pCommandBuffers[i],
10063                                                                pSubCB->beginInfo.pInheritanceInfo->renderPass);
10064                    skipCall |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
10065                }
10066                string errorString = "";
10067                if (!verify_renderpass_compatibility(dev_data, pCB->activeRenderPass,
10068                                                     pSubCB->beginInfo.pInheritanceInfo->renderPass, errorString)) {
10069                    skipCall |= log_msg(
10070                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10071                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
10072                        "vkCmdExecuteCommands(): Secondary Command Buffer (%p) w/ render pass (%#" PRIxLEAST64
10073                        ") is incompatible w/ primary command buffer (%p) w/ render pass (%#" PRIxLEAST64 ") due to: %s",
10074                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
10075                        (uint64_t)pCB->activeRenderPass, errorString.c_str());
10076                }
10077                //  If framebuffer for secondary CB is not NULL, then it must match FB from vkCmdBeginRenderPass()
10078                //   that this CB will be executed in AND framebuffer must have been created w/ RP compatible w/ renderpass
10079                if (pSubCB->beginInfo.pInheritanceInfo->framebuffer) {
10080                    if (pSubCB->beginInfo.pInheritanceInfo->framebuffer != pCB->activeRenderPassBeginInfo.framebuffer) {
10081                        skipCall |= log_msg(
10082                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10083                            (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
10084                            "vkCmdExecuteCommands(): Secondary Command Buffer (%p) references framebuffer (%#" PRIxLEAST64
10085                            ") that does not match framebuffer (%#" PRIxLEAST64 ") in active renderpass (%#" PRIxLEAST64 ").",
10086                            (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer,
10087                            (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass);
10088                    }
10089                }
10090            }
10091            // TODO(mlentine): Move more logic into this method
10092            skipCall |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
10093            skipCall |= validateCommandBufferState(dev_data, pSubCB);
10094            // Secondary cmdBuffers are considered pending execution starting w/
10095            // being recorded
10096            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
10097                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
10098                    skipCall |= log_msg(
10099                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10100                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10101                        "Attempt to simultaneously execute CB %#" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10102                        "set!",
10103                        (uint64_t)(pCB->commandBuffer));
10104                }
10105                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
10106                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
10107                    skipCall |= log_msg(
10108                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10109                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10110                        "vkCmdExecuteCommands(): Secondary Command Buffer (%#" PRIxLEAST64
10111                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
10112                        "(%#" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10113                                          "set, even though it does.",
10114                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
10115                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
10116                }
10117            }
10118            if (!pCB->activeQueries.empty() && !dev_data->physDevProperties.features.inheritedQueries) {
10119                skipCall |=
10120                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10121                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
10122                            "vkCmdExecuteCommands(): Secondary Command Buffer "
10123                            "(%#" PRIxLEAST64 ") cannot be submitted with a query in "
10124                            "flight and inherited queries not "
10125                            "supported on this device.",
10126                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
10127            }
10128            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
10129            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
10130            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
10131        }
10132        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
10133        skipCall |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
10134    }
10135    loader_platform_thread_unlock_mutex(&globalLock);
10136    if (VK_FALSE == skipCall)
10137        dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
10138}
10139
10140VkBool32 ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) {
10141    VkBool32 skip_call = VK_FALSE;
10142    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10143    auto mem_data = dev_data->memObjMap.find(mem);
10144    if ((mem_data != dev_data->memObjMap.end()) && (mem_data->second.image != VK_NULL_HANDLE)) {
10145        std::vector<VkImageLayout> layouts;
10146        if (FindLayouts(dev_data, mem_data->second.image, layouts)) {
10147            for (auto layout : layouts) {
10148                if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
10149                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10150                                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
10151                                                                                         "GENERAL or PREINITIALIZED are supported.",
10152                                         string_VkImageLayout(layout));
10153                }
10154            }
10155        }
10156    }
10157    return skip_call;
10158}
10159
10160VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10161vkMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
10162    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10163
10164    VkBool32 skip_call = VK_FALSE;
10165    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10166    loader_platform_thread_lock_mutex(&globalLock);
10167#if MTMERGE
10168    DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
10169    if (pMemObj) {
10170        pMemObj->valid = true;
10171        if ((memProps.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
10172            skip_call =
10173                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10174                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
10175                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj %#" PRIxLEAST64, (uint64_t)mem);
10176        }
10177    }
10178    skip_call |= validateMemRange(dev_data, mem, offset, size);
10179    storeMemRanges(dev_data, mem, offset, size);
10180#endif
10181    skip_call |= ValidateMapImageLayouts(device, mem);
10182    loader_platform_thread_unlock_mutex(&globalLock);
10183
10184    if (VK_FALSE == skip_call) {
10185        result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData);
10186#if MTMERGE
10187        initializeAndTrackMemory(dev_data, mem, size, ppData);
10188#endif
10189    }
10190    return result;
10191}
10192
10193#if MTMERGE
10194VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(VkDevice device, VkDeviceMemory mem) {
10195    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10196    VkBool32 skipCall = VK_FALSE;
10197
10198    loader_platform_thread_lock_mutex(&globalLock);
10199    skipCall |= deleteMemRanges(my_data, mem);
10200    loader_platform_thread_unlock_mutex(&globalLock);
10201    if (VK_FALSE == skipCall) {
10202        my_data->device_dispatch_table->UnmapMemory(device, mem);
10203    }
10204}
10205
10206VkBool32 validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount,
10207                                const VkMappedMemoryRange *pMemRanges) {
10208    VkBool32 skipCall = VK_FALSE;
10209    for (uint32_t i = 0; i < memRangeCount; ++i) {
10210        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
10211        if (mem_element != my_data->memObjMap.end()) {
10212            if (mem_element->second.memRange.offset > pMemRanges[i].offset) {
10213                skipCall |= log_msg(
10214                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10215                    (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10216                    "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
10217                    "(" PRINTF_SIZE_T_SPECIFIER ").",
10218                    funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_element->second.memRange.offset));
10219            }
10220            if ((mem_element->second.memRange.size != VK_WHOLE_SIZE) &&
10221                ((mem_element->second.memRange.offset + mem_element->second.memRange.size) <
10222                 (pMemRanges[i].offset + pMemRanges[i].size))) {
10223                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10224                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10225                                    MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
10226                                                                 ") exceeds the Memory Object's upper-bound "
10227                                                                 "(" PRINTF_SIZE_T_SPECIFIER ").",
10228                                    funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
10229                                    static_cast<size_t>(mem_element->second.memRange.offset + mem_element->second.memRange.size));
10230            }
10231        }
10232    }
10233    return skipCall;
10234}
10235
10236VkBool32 validateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount,
10237                                                  const VkMappedMemoryRange *pMemRanges) {
10238    VkBool32 skipCall = VK_FALSE;
10239    for (uint32_t i = 0; i < memRangeCount; ++i) {
10240        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
10241        if (mem_element != my_data->memObjMap.end()) {
10242            if (mem_element->second.pData) {
10243                VkDeviceSize size = mem_element->second.memRange.size;
10244                VkDeviceSize half_size = (size / 2);
10245                char *data = static_cast<char *>(mem_element->second.pData);
10246                for (auto j = 0; j < half_size; ++j) {
10247                    if (data[j] != NoncoherentMemoryFillValue) {
10248                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10249                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10250                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
10251                                            (uint64_t)pMemRanges[i].memory);
10252                    }
10253                }
10254                for (auto j = size + half_size; j < 2 * size; ++j) {
10255                    if (data[j] != NoncoherentMemoryFillValue) {
10256                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10257                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10258                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
10259                                            (uint64_t)pMemRanges[i].memory);
10260                    }
10261                }
10262                memcpy(mem_element->second.pDriverData, static_cast<void *>(data + (size_t)(half_size)), (size_t)(size));
10263            }
10264        }
10265    }
10266    return skipCall;
10267}
10268
10269VK_LAYER_EXPORT VkResult VKAPI_CALL
10270vkFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10271    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10272    VkBool32 skipCall = VK_FALSE;
10273    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10274
10275    loader_platform_thread_lock_mutex(&globalLock);
10276    skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
10277    skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
10278    loader_platform_thread_unlock_mutex(&globalLock);
10279    if (VK_FALSE == skipCall) {
10280        result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
10281    }
10282    return result;
10283}
10284
10285VK_LAYER_EXPORT VkResult VKAPI_CALL
10286vkInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10287    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10288    VkBool32 skipCall = VK_FALSE;
10289    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10290
10291    loader_platform_thread_lock_mutex(&globalLock);
10292    skipCall |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
10293    loader_platform_thread_unlock_mutex(&globalLock);
10294    if (VK_FALSE == skipCall) {
10295        result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
10296    }
10297    return result;
10298}
10299#endif
10300
10301VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
10302    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10303    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10304#if MTMERGE
10305    loader_platform_thread_lock_mutex(&globalLock);
10306    // Track objects tied to memory
10307    uint64_t image_handle = (uint64_t)(image);
10308    VkBool32 skipCall =
10309        set_mem_binding(dev_data, device, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
10310    add_object_binding_info(dev_data, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, mem);
10311    {
10312        VkMemoryRequirements memRequirements;
10313        vkGetImageMemoryRequirements(device, image, &memRequirements);
10314        skipCall |= validate_buffer_image_aliasing(dev_data, image_handle, mem, memoryOffset, memRequirements,
10315                                                   dev_data->memObjMap[mem].imageRanges, dev_data->memObjMap[mem].bufferRanges,
10316                                                   VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
10317    }
10318    print_mem_list(dev_data, device);
10319    loader_platform_thread_unlock_mutex(&globalLock);
10320#endif
10321    if (VK_FALSE == skipCall) {
10322        result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset);
10323        VkMemoryRequirements memRequirements;
10324        dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements);
10325        loader_platform_thread_lock_mutex(&globalLock);
10326        dev_data->memObjMap[mem].image = image;
10327        dev_data->imageMap[image].mem = mem;
10328        dev_data->imageMap[image].memOffset = memoryOffset;
10329        dev_data->imageMap[image].memSize = memRequirements.size;
10330        loader_platform_thread_unlock_mutex(&globalLock);
10331    }
10332    return result;
10333}
10334
10335VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(VkDevice device, VkEvent event) {
10336    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10337    loader_platform_thread_lock_mutex(&globalLock);
10338    dev_data->eventMap[event].needsSignaled = false;
10339    dev_data->eventMap[event].stageMask = VK_PIPELINE_STAGE_HOST_BIT;
10340    loader_platform_thread_unlock_mutex(&globalLock);
10341    VkResult result = dev_data->device_dispatch_table->SetEvent(device, event);
10342    return result;
10343}
10344
10345VKAPI_ATTR VkResult VKAPI_CALL
10346vkQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
10347    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10348    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10349    VkBool32 skip_call = VK_FALSE;
10350#if MTMERGE
10351    //MTMTODO : Merge this code with the checks below
10352    loader_platform_thread_lock_mutex(&globalLock);
10353
10354    for (uint32_t i = 0; i < bindInfoCount; i++) {
10355        const VkBindSparseInfo *bindInfo = &pBindInfo[i];
10356        // Track objects tied to memory
10357        for (uint32_t j = 0; j < bindInfo->bufferBindCount; j++) {
10358            for (uint32_t k = 0; k < bindInfo->pBufferBinds[j].bindCount; k++) {
10359                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pBufferBinds[j].pBinds[k].memory,
10360                                           (uint64_t)bindInfo->pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
10361                                           "vkQueueBindSparse"))
10362                    skip_call = VK_TRUE;
10363            }
10364        }
10365        for (uint32_t j = 0; j < bindInfo->imageOpaqueBindCount; j++) {
10366            for (uint32_t k = 0; k < bindInfo->pImageOpaqueBinds[j].bindCount; k++) {
10367                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pImageOpaqueBinds[j].pBinds[k].memory,
10368                                           (uint64_t)bindInfo->pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10369                                           "vkQueueBindSparse"))
10370                    skip_call = VK_TRUE;
10371            }
10372        }
10373        for (uint32_t j = 0; j < bindInfo->imageBindCount; j++) {
10374            for (uint32_t k = 0; k < bindInfo->pImageBinds[j].bindCount; k++) {
10375                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pImageBinds[j].pBinds[k].memory,
10376                                           (uint64_t)bindInfo->pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10377                                           "vkQueueBindSparse"))
10378                    skip_call = VK_TRUE;
10379            }
10380        }
10381        // Validate semaphore state
10382        for (uint32_t i = 0; i < bindInfo->waitSemaphoreCount; i++) {
10383            VkSemaphore sem = bindInfo->pWaitSemaphores[i];
10384
10385            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10386                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_SIGNALLED) {
10387                    skip_call =
10388                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10389                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10390                                "vkQueueBindSparse: Semaphore must be in signaled state before passing to pWaitSemaphores");
10391                }
10392                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_WAIT;
10393            }
10394        }
10395        for (uint32_t i = 0; i < bindInfo->signalSemaphoreCount; i++) {
10396            VkSemaphore sem = bindInfo->pSignalSemaphores[i];
10397
10398            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10399                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
10400                    skip_call =
10401                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10402                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10403                                "vkQueueBindSparse: Semaphore must not be currently signaled or in a wait state");
10404                }
10405                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
10406            }
10407        }
10408    }
10409
10410    print_mem_list(dev_data, queue);
10411    loader_platform_thread_unlock_mutex(&globalLock);
10412#endif
10413    loader_platform_thread_lock_mutex(&globalLock);
10414    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
10415        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
10416        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10417            if (dev_data->semaphoreMap[bindInfo.pWaitSemaphores[i]].signaled) {
10418                dev_data->semaphoreMap[bindInfo.pWaitSemaphores[i]].signaled = 0;
10419            } else {
10420                skip_call |=
10421                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10422                            __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10423                            "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
10424                            (uint64_t)(queue), (uint64_t)(bindInfo.pWaitSemaphores[i]));
10425            }
10426        }
10427        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
10428            dev_data->semaphoreMap[bindInfo.pSignalSemaphores[i]].signaled = 1;
10429        }
10430    }
10431    loader_platform_thread_unlock_mutex(&globalLock);
10432
10433    if (VK_FALSE == skip_call)
10434        return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
10435#if MTMERGE
10436    // Update semaphore state
10437    loader_platform_thread_lock_mutex(&globalLock);
10438    for (uint32_t bind_info_idx = 0; bind_info_idx < bindInfoCount; bind_info_idx++) {
10439        const VkBindSparseInfo *bindInfo = &pBindInfo[bind_info_idx];
10440        for (uint32_t i = 0; i < bindInfo->waitSemaphoreCount; i++) {
10441            VkSemaphore sem = bindInfo->pWaitSemaphores[i];
10442
10443            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10444                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10445            }
10446        }
10447    }
10448    loader_platform_thread_unlock_mutex(&globalLock);
10449#endif
10450
10451    return result;
10452}
10453
10454VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
10455                                                 const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
10456    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10457    VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
10458    if (result == VK_SUCCESS) {
10459        loader_platform_thread_lock_mutex(&globalLock);
10460        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
10461        sNode->signaled = 0;
10462        sNode->queue = VK_NULL_HANDLE;
10463        sNode->in_use.store(0);
10464        sNode->state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10465        loader_platform_thread_unlock_mutex(&globalLock);
10466    }
10467    return result;
10468}
10469
10470VKAPI_ATTR VkResult VKAPI_CALL
10471vkCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
10472    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10473    VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
10474    if (result == VK_SUCCESS) {
10475        loader_platform_thread_lock_mutex(&globalLock);
10476        dev_data->eventMap[*pEvent].needsSignaled = false;
10477        dev_data->eventMap[*pEvent].in_use.store(0);
10478        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
10479        loader_platform_thread_unlock_mutex(&globalLock);
10480    }
10481    return result;
10482}
10483
10484VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
10485                                                                    const VkAllocationCallbacks *pAllocator,
10486                                                                    VkSwapchainKHR *pSwapchain) {
10487    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10488    VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
10489
10490    if (VK_SUCCESS == result) {
10491        SWAPCHAIN_NODE *psc_node = new SWAPCHAIN_NODE(pCreateInfo);
10492        loader_platform_thread_lock_mutex(&globalLock);
10493        dev_data->device_extensions.swapchainMap[*pSwapchain] = psc_node;
10494        loader_platform_thread_unlock_mutex(&globalLock);
10495    }
10496
10497    return result;
10498}
10499
10500VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10501vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
10502    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10503    bool skipCall = false;
10504
10505    loader_platform_thread_lock_mutex(&globalLock);
10506    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(swapchain);
10507    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
10508        if (swapchain_data->second->images.size() > 0) {
10509            for (auto swapchain_image : swapchain_data->second->images) {
10510                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
10511                if (image_sub != dev_data->imageSubresourceMap.end()) {
10512                    for (auto imgsubpair : image_sub->second) {
10513                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
10514                        if (image_item != dev_data->imageLayoutMap.end()) {
10515                            dev_data->imageLayoutMap.erase(image_item);
10516                        }
10517                    }
10518                    dev_data->imageSubresourceMap.erase(image_sub);
10519                }
10520                skipCall = clear_object_binding(dev_data, device, (uint64_t)swapchain_image,
10521                                                VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
10522                dev_data->imageBindingMap.erase((uint64_t)swapchain_image);
10523            }
10524        }
10525        delete swapchain_data->second;
10526        dev_data->device_extensions.swapchainMap.erase(swapchain);
10527    }
10528    loader_platform_thread_unlock_mutex(&globalLock);
10529    if (!skipCall)
10530        dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator);
10531}
10532
10533VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10534vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
10535    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10536    VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
10537
10538    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
10539        // This should never happen and is checked by param checker.
10540        if (!pCount)
10541            return result;
10542        loader_platform_thread_lock_mutex(&globalLock);
10543        const size_t count = *pCount;
10544        auto swapchain_node = dev_data->device_extensions.swapchainMap[swapchain];
10545        if (!swapchain_node->images.empty()) {
10546            // TODO : Not sure I like the memcmp here, but it works
10547            const bool mismatch = (swapchain_node->images.size() != count ||
10548                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
10549            if (mismatch) {
10550                // TODO: Verify against Valid Usage section of extension
10551                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10552                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
10553                        "vkGetSwapchainInfoKHR(%" PRIu64
10554                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
10555                        (uint64_t)(swapchain));
10556            }
10557        }
10558        for (uint32_t i = 0; i < *pCount; ++i) {
10559            IMAGE_LAYOUT_NODE image_layout_node;
10560            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
10561            image_layout_node.format = swapchain_node->createInfo.imageFormat;
10562            dev_data->imageMap[pSwapchainImages[i]].createInfo.mipLevels = 1;
10563            dev_data->imageMap[pSwapchainImages[i]].createInfo.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
10564            swapchain_node->images.push_back(pSwapchainImages[i]);
10565            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
10566            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
10567            dev_data->imageLayoutMap[subpair] = image_layout_node;
10568            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
10569        }
10570        if (!swapchain_node->images.empty()) {
10571            for (auto image : swapchain_node->images) {
10572                // Add image object binding, then insert the new Mem Object and then bind it to created image
10573                add_object_create_info(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10574                                       &swapchain_node->createInfo);
10575            }
10576        }
10577        loader_platform_thread_unlock_mutex(&globalLock);
10578    }
10579    return result;
10580}
10581
10582VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
10583    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10584    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10585    bool skip_call = false;
10586
10587    if (pPresentInfo) {
10588        loader_platform_thread_lock_mutex(&globalLock);
10589        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10590            if (dev_data->semaphoreMap[pPresentInfo->pWaitSemaphores[i]].signaled) {
10591                dev_data->semaphoreMap[pPresentInfo->pWaitSemaphores[i]].signaled = 0;
10592            } else {
10593                skip_call |=
10594                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10595                            __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10596                            "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
10597                            (uint64_t)(queue), (uint64_t)(pPresentInfo->pWaitSemaphores[i]));
10598            }
10599        }
10600        VkDeviceMemory mem;
10601        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10602            auto swapchain_data = dev_data->device_extensions.swapchainMap.find(pPresentInfo->pSwapchains[i]);
10603            if (swapchain_data != dev_data->device_extensions.swapchainMap.end() &&
10604                pPresentInfo->pImageIndices[i] < swapchain_data->second->images.size()) {
10605                VkImage image = swapchain_data->second->images[pPresentInfo->pImageIndices[i]];
10606                skip_call |=
10607                    get_mem_binding_from_object(dev_data, queue, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
10608                skip_call |= validate_memory_is_valid(dev_data, mem, "vkQueuePresentKHR()", image);
10609                vector<VkImageLayout> layouts;
10610                if (FindLayouts(dev_data, image, layouts)) {
10611                    for (auto layout : layouts) {
10612                        if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
10613                            skip_call |=
10614                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
10615                                        reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10616                                        "Images passed to present must be in layout "
10617                                        "PRESENT_SOURCE_KHR but is in %s",
10618                                        string_VkImageLayout(layout));
10619                        }
10620                    }
10621                }
10622            }
10623        }
10624        loader_platform_thread_unlock_mutex(&globalLock);
10625    }
10626
10627    if (!skip_call)
10628        result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo);
10629#if MTMERGE
10630    loader_platform_thread_lock_mutex(&globalLock);
10631    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; i++) {
10632        VkSemaphore sem = pPresentInfo->pWaitSemaphores[i];
10633        if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10634            dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10635        }
10636    }
10637    loader_platform_thread_unlock_mutex(&globalLock);
10638#endif
10639    return result;
10640}
10641
10642VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
10643                                                     VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
10644    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10645    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10646    bool skipCall = false;
10647#if MTMERGE
10648    loader_platform_thread_lock_mutex(&globalLock);
10649    if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
10650        if (dev_data->semaphoreMap[semaphore].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
10651            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10652                               (uint64_t)semaphore, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10653                               "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
10654        }
10655        dev_data->semaphoreMap[semaphore].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
10656    }
10657    auto fence_data = dev_data->fenceMap.find(fence);
10658    if (fence_data != dev_data->fenceMap.end()) {
10659        fence_data->second.swapchain = swapchain;
10660    }
10661    loader_platform_thread_unlock_mutex(&globalLock);
10662#endif
10663    if (!skipCall) {
10664        result =
10665            dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
10666    }
10667    loader_platform_thread_lock_mutex(&globalLock);
10668    // FIXME/TODO: Need to add some thing code the "fence" parameter
10669    dev_data->semaphoreMap[semaphore].signaled = 1;
10670    loader_platform_thread_unlock_mutex(&globalLock);
10671    return result;
10672}
10673
10674VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10675vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10676                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
10677    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10678    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10679    VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10680    if (VK_SUCCESS == res) {
10681        loader_platform_thread_lock_mutex(&globalLock);
10682        res = layer_create_msg_callback(my_data->report_data, pCreateInfo, pAllocator, pMsgCallback);
10683        loader_platform_thread_unlock_mutex(&globalLock);
10684    }
10685    return res;
10686}
10687
10688VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance,
10689                                                                           VkDebugReportCallbackEXT msgCallback,
10690                                                                           const VkAllocationCallbacks *pAllocator) {
10691    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10692    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10693    pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10694    loader_platform_thread_lock_mutex(&globalLock);
10695    layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
10696    loader_platform_thread_unlock_mutex(&globalLock);
10697}
10698
10699VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10700vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
10701                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10702    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10703    my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
10704                                                            pMsg);
10705}
10706
10707VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10708    if (!strcmp(funcName, "vkGetDeviceProcAddr"))
10709        return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
10710    if (!strcmp(funcName, "vkDestroyDevice"))
10711        return (PFN_vkVoidFunction)vkDestroyDevice;
10712    if (!strcmp(funcName, "vkQueueSubmit"))
10713        return (PFN_vkVoidFunction)vkQueueSubmit;
10714    if (!strcmp(funcName, "vkWaitForFences"))
10715        return (PFN_vkVoidFunction)vkWaitForFences;
10716    if (!strcmp(funcName, "vkGetFenceStatus"))
10717        return (PFN_vkVoidFunction)vkGetFenceStatus;
10718    if (!strcmp(funcName, "vkQueueWaitIdle"))
10719        return (PFN_vkVoidFunction)vkQueueWaitIdle;
10720    if (!strcmp(funcName, "vkDeviceWaitIdle"))
10721        return (PFN_vkVoidFunction)vkDeviceWaitIdle;
10722    if (!strcmp(funcName, "vkGetDeviceQueue"))
10723        return (PFN_vkVoidFunction)vkGetDeviceQueue;
10724    if (!strcmp(funcName, "vkDestroyInstance"))
10725        return (PFN_vkVoidFunction)vkDestroyInstance;
10726    if (!strcmp(funcName, "vkDestroyDevice"))
10727        return (PFN_vkVoidFunction)vkDestroyDevice;
10728    if (!strcmp(funcName, "vkDestroyFence"))
10729        return (PFN_vkVoidFunction)vkDestroyFence;
10730    if (!strcmp(funcName, "vkResetFences"))
10731        return (PFN_vkVoidFunction)vkResetFences;
10732    if (!strcmp(funcName, "vkDestroySemaphore"))
10733        return (PFN_vkVoidFunction)vkDestroySemaphore;
10734    if (!strcmp(funcName, "vkDestroyEvent"))
10735        return (PFN_vkVoidFunction)vkDestroyEvent;
10736    if (!strcmp(funcName, "vkDestroyQueryPool"))
10737        return (PFN_vkVoidFunction)vkDestroyQueryPool;
10738    if (!strcmp(funcName, "vkDestroyBuffer"))
10739        return (PFN_vkVoidFunction)vkDestroyBuffer;
10740    if (!strcmp(funcName, "vkDestroyBufferView"))
10741        return (PFN_vkVoidFunction)vkDestroyBufferView;
10742    if (!strcmp(funcName, "vkDestroyImage"))
10743        return (PFN_vkVoidFunction)vkDestroyImage;
10744    if (!strcmp(funcName, "vkDestroyImageView"))
10745        return (PFN_vkVoidFunction)vkDestroyImageView;
10746    if (!strcmp(funcName, "vkDestroyShaderModule"))
10747        return (PFN_vkVoidFunction)vkDestroyShaderModule;
10748    if (!strcmp(funcName, "vkDestroyPipeline"))
10749        return (PFN_vkVoidFunction)vkDestroyPipeline;
10750    if (!strcmp(funcName, "vkDestroyPipelineLayout"))
10751        return (PFN_vkVoidFunction)vkDestroyPipelineLayout;
10752    if (!strcmp(funcName, "vkDestroySampler"))
10753        return (PFN_vkVoidFunction)vkDestroySampler;
10754    if (!strcmp(funcName, "vkDestroyDescriptorSetLayout"))
10755        return (PFN_vkVoidFunction)vkDestroyDescriptorSetLayout;
10756    if (!strcmp(funcName, "vkDestroyDescriptorPool"))
10757        return (PFN_vkVoidFunction)vkDestroyDescriptorPool;
10758    if (!strcmp(funcName, "vkDestroyFramebuffer"))
10759        return (PFN_vkVoidFunction)vkDestroyFramebuffer;
10760    if (!strcmp(funcName, "vkDestroyRenderPass"))
10761        return (PFN_vkVoidFunction)vkDestroyRenderPass;
10762    if (!strcmp(funcName, "vkCreateBuffer"))
10763        return (PFN_vkVoidFunction)vkCreateBuffer;
10764    if (!strcmp(funcName, "vkCreateBufferView"))
10765        return (PFN_vkVoidFunction)vkCreateBufferView;
10766    if (!strcmp(funcName, "vkCreateImage"))
10767        return (PFN_vkVoidFunction)vkCreateImage;
10768    if (!strcmp(funcName, "vkCreateImageView"))
10769        return (PFN_vkVoidFunction)vkCreateImageView;
10770    if (!strcmp(funcName, "vkCreateFence"))
10771        return (PFN_vkVoidFunction)vkCreateFence;
10772    if (!strcmp(funcName, "CreatePipelineCache"))
10773        return (PFN_vkVoidFunction)vkCreatePipelineCache;
10774    if (!strcmp(funcName, "DestroyPipelineCache"))
10775        return (PFN_vkVoidFunction)vkDestroyPipelineCache;
10776    if (!strcmp(funcName, "GetPipelineCacheData"))
10777        return (PFN_vkVoidFunction)vkGetPipelineCacheData;
10778    if (!strcmp(funcName, "MergePipelineCaches"))
10779        return (PFN_vkVoidFunction)vkMergePipelineCaches;
10780    if (!strcmp(funcName, "vkCreateGraphicsPipelines"))
10781        return (PFN_vkVoidFunction)vkCreateGraphicsPipelines;
10782    if (!strcmp(funcName, "vkCreateComputePipelines"))
10783        return (PFN_vkVoidFunction)vkCreateComputePipelines;
10784    if (!strcmp(funcName, "vkCreateSampler"))
10785        return (PFN_vkVoidFunction)vkCreateSampler;
10786    if (!strcmp(funcName, "vkCreateDescriptorSetLayout"))
10787        return (PFN_vkVoidFunction)vkCreateDescriptorSetLayout;
10788    if (!strcmp(funcName, "vkCreatePipelineLayout"))
10789        return (PFN_vkVoidFunction)vkCreatePipelineLayout;
10790    if (!strcmp(funcName, "vkCreateDescriptorPool"))
10791        return (PFN_vkVoidFunction)vkCreateDescriptorPool;
10792    if (!strcmp(funcName, "vkResetDescriptorPool"))
10793        return (PFN_vkVoidFunction)vkResetDescriptorPool;
10794    if (!strcmp(funcName, "vkAllocateDescriptorSets"))
10795        return (PFN_vkVoidFunction)vkAllocateDescriptorSets;
10796    if (!strcmp(funcName, "vkFreeDescriptorSets"))
10797        return (PFN_vkVoidFunction)vkFreeDescriptorSets;
10798    if (!strcmp(funcName, "vkUpdateDescriptorSets"))
10799        return (PFN_vkVoidFunction)vkUpdateDescriptorSets;
10800    if (!strcmp(funcName, "vkCreateCommandPool"))
10801        return (PFN_vkVoidFunction)vkCreateCommandPool;
10802    if (!strcmp(funcName, "vkDestroyCommandPool"))
10803        return (PFN_vkVoidFunction)vkDestroyCommandPool;
10804    if (!strcmp(funcName, "vkResetCommandPool"))
10805        return (PFN_vkVoidFunction)vkResetCommandPool;
10806    if (!strcmp(funcName, "vkCreateQueryPool"))
10807        return (PFN_vkVoidFunction)vkCreateQueryPool;
10808    if (!strcmp(funcName, "vkAllocateCommandBuffers"))
10809        return (PFN_vkVoidFunction)vkAllocateCommandBuffers;
10810    if (!strcmp(funcName, "vkFreeCommandBuffers"))
10811        return (PFN_vkVoidFunction)vkFreeCommandBuffers;
10812    if (!strcmp(funcName, "vkBeginCommandBuffer"))
10813        return (PFN_vkVoidFunction)vkBeginCommandBuffer;
10814    if (!strcmp(funcName, "vkEndCommandBuffer"))
10815        return (PFN_vkVoidFunction)vkEndCommandBuffer;
10816    if (!strcmp(funcName, "vkResetCommandBuffer"))
10817        return (PFN_vkVoidFunction)vkResetCommandBuffer;
10818    if (!strcmp(funcName, "vkCmdBindPipeline"))
10819        return (PFN_vkVoidFunction)vkCmdBindPipeline;
10820    if (!strcmp(funcName, "vkCmdSetViewport"))
10821        return (PFN_vkVoidFunction)vkCmdSetViewport;
10822    if (!strcmp(funcName, "vkCmdSetScissor"))
10823        return (PFN_vkVoidFunction)vkCmdSetScissor;
10824    if (!strcmp(funcName, "vkCmdSetLineWidth"))
10825        return (PFN_vkVoidFunction)vkCmdSetLineWidth;
10826    if (!strcmp(funcName, "vkCmdSetDepthBias"))
10827        return (PFN_vkVoidFunction)vkCmdSetDepthBias;
10828    if (!strcmp(funcName, "vkCmdSetBlendConstants"))
10829        return (PFN_vkVoidFunction)vkCmdSetBlendConstants;
10830    if (!strcmp(funcName, "vkCmdSetDepthBounds"))
10831        return (PFN_vkVoidFunction)vkCmdSetDepthBounds;
10832    if (!strcmp(funcName, "vkCmdSetStencilCompareMask"))
10833        return (PFN_vkVoidFunction)vkCmdSetStencilCompareMask;
10834    if (!strcmp(funcName, "vkCmdSetStencilWriteMask"))
10835        return (PFN_vkVoidFunction)vkCmdSetStencilWriteMask;
10836    if (!strcmp(funcName, "vkCmdSetStencilReference"))
10837        return (PFN_vkVoidFunction)vkCmdSetStencilReference;
10838    if (!strcmp(funcName, "vkCmdBindDescriptorSets"))
10839        return (PFN_vkVoidFunction)vkCmdBindDescriptorSets;
10840    if (!strcmp(funcName, "vkCmdBindVertexBuffers"))
10841        return (PFN_vkVoidFunction)vkCmdBindVertexBuffers;
10842    if (!strcmp(funcName, "vkCmdBindIndexBuffer"))
10843        return (PFN_vkVoidFunction)vkCmdBindIndexBuffer;
10844    if (!strcmp(funcName, "vkCmdDraw"))
10845        return (PFN_vkVoidFunction)vkCmdDraw;
10846    if (!strcmp(funcName, "vkCmdDrawIndexed"))
10847        return (PFN_vkVoidFunction)vkCmdDrawIndexed;
10848    if (!strcmp(funcName, "vkCmdDrawIndirect"))
10849        return (PFN_vkVoidFunction)vkCmdDrawIndirect;
10850    if (!strcmp(funcName, "vkCmdDrawIndexedIndirect"))
10851        return (PFN_vkVoidFunction)vkCmdDrawIndexedIndirect;
10852    if (!strcmp(funcName, "vkCmdDispatch"))
10853        return (PFN_vkVoidFunction)vkCmdDispatch;
10854    if (!strcmp(funcName, "vkCmdDispatchIndirect"))
10855        return (PFN_vkVoidFunction)vkCmdDispatchIndirect;
10856    if (!strcmp(funcName, "vkCmdCopyBuffer"))
10857        return (PFN_vkVoidFunction)vkCmdCopyBuffer;
10858    if (!strcmp(funcName, "vkCmdCopyImage"))
10859        return (PFN_vkVoidFunction)vkCmdCopyImage;
10860    if (!strcmp(funcName, "vkCmdBlitImage"))
10861        return (PFN_vkVoidFunction)vkCmdBlitImage;
10862    if (!strcmp(funcName, "vkCmdCopyBufferToImage"))
10863        return (PFN_vkVoidFunction)vkCmdCopyBufferToImage;
10864    if (!strcmp(funcName, "vkCmdCopyImageToBuffer"))
10865        return (PFN_vkVoidFunction)vkCmdCopyImageToBuffer;
10866    if (!strcmp(funcName, "vkCmdUpdateBuffer"))
10867        return (PFN_vkVoidFunction)vkCmdUpdateBuffer;
10868    if (!strcmp(funcName, "vkCmdFillBuffer"))
10869        return (PFN_vkVoidFunction)vkCmdFillBuffer;
10870    if (!strcmp(funcName, "vkCmdClearColorImage"))
10871        return (PFN_vkVoidFunction)vkCmdClearColorImage;
10872    if (!strcmp(funcName, "vkCmdClearDepthStencilImage"))
10873        return (PFN_vkVoidFunction)vkCmdClearDepthStencilImage;
10874    if (!strcmp(funcName, "vkCmdClearAttachments"))
10875        return (PFN_vkVoidFunction)vkCmdClearAttachments;
10876    if (!strcmp(funcName, "vkCmdResolveImage"))
10877        return (PFN_vkVoidFunction)vkCmdResolveImage;
10878    if (!strcmp(funcName, "vkCmdSetEvent"))
10879        return (PFN_vkVoidFunction)vkCmdSetEvent;
10880    if (!strcmp(funcName, "vkCmdResetEvent"))
10881        return (PFN_vkVoidFunction)vkCmdResetEvent;
10882    if (!strcmp(funcName, "vkCmdWaitEvents"))
10883        return (PFN_vkVoidFunction)vkCmdWaitEvents;
10884    if (!strcmp(funcName, "vkCmdPipelineBarrier"))
10885        return (PFN_vkVoidFunction)vkCmdPipelineBarrier;
10886    if (!strcmp(funcName, "vkCmdBeginQuery"))
10887        return (PFN_vkVoidFunction)vkCmdBeginQuery;
10888    if (!strcmp(funcName, "vkCmdEndQuery"))
10889        return (PFN_vkVoidFunction)vkCmdEndQuery;
10890    if (!strcmp(funcName, "vkCmdResetQueryPool"))
10891        return (PFN_vkVoidFunction)vkCmdResetQueryPool;
10892    if (!strcmp(funcName, "vkCmdCopyQueryPoolResults"))
10893        return (PFN_vkVoidFunction)vkCmdCopyQueryPoolResults;
10894    if (!strcmp(funcName, "vkCmdPushConstants"))
10895        return (PFN_vkVoidFunction)vkCmdPushConstants;
10896    if (!strcmp(funcName, "vkCmdWriteTimestamp"))
10897        return (PFN_vkVoidFunction)vkCmdWriteTimestamp;
10898    if (!strcmp(funcName, "vkCreateFramebuffer"))
10899        return (PFN_vkVoidFunction)vkCreateFramebuffer;
10900    if (!strcmp(funcName, "vkCreateShaderModule"))
10901        return (PFN_vkVoidFunction)vkCreateShaderModule;
10902    if (!strcmp(funcName, "vkCreateRenderPass"))
10903        return (PFN_vkVoidFunction)vkCreateRenderPass;
10904    if (!strcmp(funcName, "vkCmdBeginRenderPass"))
10905        return (PFN_vkVoidFunction)vkCmdBeginRenderPass;
10906    if (!strcmp(funcName, "vkCmdNextSubpass"))
10907        return (PFN_vkVoidFunction)vkCmdNextSubpass;
10908    if (!strcmp(funcName, "vkCmdEndRenderPass"))
10909        return (PFN_vkVoidFunction)vkCmdEndRenderPass;
10910    if (!strcmp(funcName, "vkCmdExecuteCommands"))
10911        return (PFN_vkVoidFunction)vkCmdExecuteCommands;
10912    if (!strcmp(funcName, "vkSetEvent"))
10913        return (PFN_vkVoidFunction)vkSetEvent;
10914    if (!strcmp(funcName, "vkMapMemory"))
10915        return (PFN_vkVoidFunction)vkMapMemory;
10916#if MTMERGE
10917    if (!strcmp(funcName, "vkUnmapMemory"))
10918        return (PFN_vkVoidFunction)vkUnmapMemory;
10919    if (!strcmp(funcName, "vkAllocateMemory"))
10920        return (PFN_vkVoidFunction)vkAllocateMemory;
10921    if (!strcmp(funcName, "vkFreeMemory"))
10922        return (PFN_vkVoidFunction)vkFreeMemory;
10923    if (!strcmp(funcName, "vkFlushMappedMemoryRanges"))
10924        return (PFN_vkVoidFunction)vkFlushMappedMemoryRanges;
10925    if (!strcmp(funcName, "vkInvalidateMappedMemoryRanges"))
10926        return (PFN_vkVoidFunction)vkInvalidateMappedMemoryRanges;
10927    if (!strcmp(funcName, "vkBindBufferMemory"))
10928        return (PFN_vkVoidFunction)vkBindBufferMemory;
10929    if (!strcmp(funcName, "vkGetBufferMemoryRequirements"))
10930        return (PFN_vkVoidFunction)vkGetBufferMemoryRequirements;
10931    if (!strcmp(funcName, "vkGetImageMemoryRequirements"))
10932        return (PFN_vkVoidFunction)vkGetImageMemoryRequirements;
10933#endif
10934    if (!strcmp(funcName, "vkGetQueryPoolResults"))
10935        return (PFN_vkVoidFunction)vkGetQueryPoolResults;
10936    if (!strcmp(funcName, "vkBindImageMemory"))
10937        return (PFN_vkVoidFunction)vkBindImageMemory;
10938    if (!strcmp(funcName, "vkQueueBindSparse"))
10939        return (PFN_vkVoidFunction)vkQueueBindSparse;
10940    if (!strcmp(funcName, "vkCreateSemaphore"))
10941        return (PFN_vkVoidFunction)vkCreateSemaphore;
10942    if (!strcmp(funcName, "vkCreateEvent"))
10943        return (PFN_vkVoidFunction)vkCreateEvent;
10944
10945    if (dev == NULL)
10946        return NULL;
10947
10948    layer_data *dev_data;
10949    dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
10950
10951    if (dev_data->device_extensions.wsi_enabled) {
10952        if (!strcmp(funcName, "vkCreateSwapchainKHR"))
10953            return (PFN_vkVoidFunction)vkCreateSwapchainKHR;
10954        if (!strcmp(funcName, "vkDestroySwapchainKHR"))
10955            return (PFN_vkVoidFunction)vkDestroySwapchainKHR;
10956        if (!strcmp(funcName, "vkGetSwapchainImagesKHR"))
10957            return (PFN_vkVoidFunction)vkGetSwapchainImagesKHR;
10958        if (!strcmp(funcName, "vkAcquireNextImageKHR"))
10959            return (PFN_vkVoidFunction)vkAcquireNextImageKHR;
10960        if (!strcmp(funcName, "vkQueuePresentKHR"))
10961            return (PFN_vkVoidFunction)vkQueuePresentKHR;
10962    }
10963
10964    VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
10965    {
10966        if (pTable->GetDeviceProcAddr == NULL)
10967            return NULL;
10968        return pTable->GetDeviceProcAddr(dev, funcName);
10969    }
10970}
10971
10972VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
10973    if (!strcmp(funcName, "vkGetInstanceProcAddr"))
10974        return (PFN_vkVoidFunction)vkGetInstanceProcAddr;
10975    if (!strcmp(funcName, "vkGetDeviceProcAddr"))
10976        return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
10977    if (!strcmp(funcName, "vkCreateInstance"))
10978        return (PFN_vkVoidFunction)vkCreateInstance;
10979    if (!strcmp(funcName, "vkCreateDevice"))
10980        return (PFN_vkVoidFunction)vkCreateDevice;
10981    if (!strcmp(funcName, "vkDestroyInstance"))
10982        return (PFN_vkVoidFunction)vkDestroyInstance;
10983#if MTMERGE
10984    if (!strcmp(funcName, "vkGetPhysicalDeviceMemoryProperties"))
10985        return (PFN_vkVoidFunction)vkGetPhysicalDeviceMemoryProperties;
10986#endif
10987    if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties"))
10988        return (PFN_vkVoidFunction)vkEnumerateInstanceLayerProperties;
10989    if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties"))
10990        return (PFN_vkVoidFunction)vkEnumerateInstanceExtensionProperties;
10991    if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties"))
10992        return (PFN_vkVoidFunction)vkEnumerateDeviceLayerProperties;
10993    if (!strcmp(funcName, "vkEnumerateDeviceExtensionProperties"))
10994        return (PFN_vkVoidFunction)vkEnumerateDeviceExtensionProperties;
10995
10996    if (instance == NULL)
10997        return NULL;
10998
10999    PFN_vkVoidFunction fptr;
11000
11001    layer_data *my_data;
11002    my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
11003    fptr = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
11004    if (fptr)
11005        return fptr;
11006
11007    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
11008    if (pTable->GetInstanceProcAddr == NULL)
11009        return NULL;
11010    return pTable->GetInstanceProcAddr(instance, funcName);
11011}
11012