core_validation.cpp revision 35cd175466f88e5a0a081b29c59c3246d5bf7f78
1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and/or associated documentation files (the "Materials"), to
8 * deal in the Materials without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Materials, and to permit persons to whom the Materials
11 * are furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice(s) and this permission notice shall be included
14 * in all copies or substantial portions of the Materials.
15 *
16 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 *
20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE
23 * USE OR OTHER DEALINGS IN THE MATERIALS
24 *
25 * Author: Cody Northrop <cnorthrop@google.com>
26 * Author: Michael Lentine <mlentine@google.com>
27 * Author: Tobin Ehlis <tobine@google.com>
28 * Author: Chia-I Wu <olv@google.com>
29 * Author: Chris Forbes <chrisf@ijw.co.nz>
30 * Author: Mark Lobodzinski <mark@lunarg.com>
31 * Author: Ian Elliott <ianelliott@google.com>
32 */
33
34// Allow use of STL min and max functions in Windows
35#define NOMINMAX
36
37// Turn on mem_tracker merged code
38#define MTMERGESOURCE 1
39
40#include <stdio.h>
41#include <stdlib.h>
42#include <string.h>
43#include <assert.h>
44#include <unordered_map>
45#include <unordered_set>
46#include <map>
47#include <string>
48#include <iostream>
49#include <algorithm>
50#include <list>
51#include <SPIRV/spirv.hpp>
52#include <set>
53
54#include "vk_loader_platform.h"
55#include "vk_dispatch_table_helper.h"
56#include "vk_struct_string_helper_cpp.h"
57#if defined(__GNUC__)
58#pragma GCC diagnostic ignored "-Wwrite-strings"
59#endif
60#if defined(__GNUC__)
61#pragma GCC diagnostic warning "-Wwrite-strings"
62#endif
63#include "vk_struct_size_helper.h"
64#include "core_validation.h"
65#include "vk_layer_config.h"
66#include "vk_layer_table.h"
67#include "vk_layer_data.h"
68#include "vk_layer_logging.h"
69#include "vk_layer_extension_utils.h"
70#include "vk_layer_utils.h"
71
72#if defined __ANDROID__
73#include <android/log.h>
74#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
75#else
76#define LOGCONSOLE(...) printf(__VA_ARGS__)
77#endif
78
79using std::unordered_map;
80using std::unordered_set;
81
82#if MTMERGESOURCE
83// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
84// Object value will be used to identify them internally.
85static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
86#endif
87// Track command pools and their command buffers
88struct CMD_POOL_INFO {
89    VkCommandPoolCreateFlags createFlags;
90    uint32_t queueFamilyIndex;
91    list<VkCommandBuffer> commandBuffers; // list container of cmd buffers allocated from this pool
92};
93
94struct devExts {
95    VkBool32 wsi_enabled;
96    unordered_map<VkSwapchainKHR, SWAPCHAIN_NODE *> swapchainMap;
97    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
98};
99
100// fwd decls
101struct shader_module;
102struct render_pass;
103
104struct layer_data {
105    debug_report_data *report_data;
106    std::vector<VkDebugReportCallbackEXT> logging_callback;
107    VkLayerDispatchTable *device_dispatch_table;
108    VkLayerInstanceDispatchTable *instance_dispatch_table;
109#if MTMERGESOURCE
110// MTMERGESOURCE - stuff pulled directly from MT
111    uint64_t currentFenceId;
112    // Maps for tracking key structs related to mem_tracker state
113    unordered_map<VkDescriptorSet, MT_DESCRIPTOR_SET_INFO> descriptorSetMap;
114    // Images and Buffers are 2 objects that can have memory bound to them so they get special treatment
115    unordered_map<uint64_t, MT_OBJ_BINDING_INFO> imageBindingMap;
116    unordered_map<uint64_t, MT_OBJ_BINDING_INFO> bufferBindingMap;
117// MTMERGESOURCE - End of MT stuff
118#endif
119    devExts device_extensions;
120    vector<VkQueue> queues; // all queues under given device
121    // Global set of all cmdBuffers that are inFlight on this device
122    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
123    // Layer specific data
124    unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> sampleMap;
125    unordered_map<VkImageView, VkImageViewCreateInfo> imageViewMap;
126    unordered_map<VkImage, IMAGE_NODE> imageMap;
127    unordered_map<VkBufferView, VkBufferViewCreateInfo> bufferViewMap;
128    unordered_map<VkBuffer, BUFFER_NODE> bufferMap;
129    unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
130    unordered_map<VkCommandPool, CMD_POOL_INFO> commandPoolMap;
131    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
132    unordered_map<VkDescriptorSet, SET_NODE *> setMap;
133    unordered_map<VkDescriptorSetLayout, LAYOUT_NODE *> descriptorSetLayoutMap;
134    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
135    unordered_map<VkDeviceMemory, DEVICE_MEM_INFO> memObjMap;
136    unordered_map<VkFence, FENCE_NODE> fenceMap;
137    unordered_map<VkQueue, QUEUE_NODE> queueMap;
138    unordered_map<VkEvent, EVENT_NODE> eventMap;
139    unordered_map<QueryObject, bool> queryToStateMap;
140    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
141    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
142    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
143    unordered_map<VkFramebuffer, FRAMEBUFFER_NODE> frameBufferMap;
144    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
145    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
146    unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
147    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
148    // Current render pass
149    VkRenderPassBeginInfo renderPassBeginInfo;
150    uint32_t currentSubpass;
151
152    // Device specific data
153    PHYS_DEV_PROPERTIES_NODE physDevProperties;
154// MTMERGESOURCE - added a couple of fields to constructor initializer
155    layer_data()
156        : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr),
157#if MTMERGESOURCE
158        currentFenceId(1),
159#endif
160        device_extensions(){};
161};
162
163static const VkLayerProperties cv_global_layers[] = {{
164    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
165}};
166
167template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
168    bool foundLayer = false;
169    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
170        if (!strcmp(createInfo.ppEnabledLayerNames[i], cv_global_layers[0].layerName)) {
171            foundLayer = true;
172        }
173        // This has to be logged to console as we don't have a callback at this point.
174        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
175            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
176                       cv_global_layers[0].layerName);
177        }
178    }
179}
180
181// Code imported from shader_checker
182static void build_def_index(shader_module *);
183
184// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
185// without the caller needing to care too much about the physical SPIRV module layout.
186struct spirv_inst_iter {
187    std::vector<uint32_t>::const_iterator zero;
188    std::vector<uint32_t>::const_iterator it;
189
190    uint32_t len() { return *it >> 16; }
191    uint32_t opcode() { return *it & 0x0ffffu; }
192    uint32_t const &word(unsigned n) { return it[n]; }
193    uint32_t offset() { return (uint32_t)(it - zero); }
194
195    spirv_inst_iter() {}
196
197    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
198
199    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
200
201    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
202
203    spirv_inst_iter operator++(int) { /* x++ */
204        spirv_inst_iter ii = *this;
205        it += len();
206        return ii;
207    }
208
209    spirv_inst_iter operator++() { /* ++x; */
210        it += len();
211        return *this;
212    }
213
214    /* The iterator and the value are the same thing. */
215    spirv_inst_iter &operator*() { return *this; }
216    spirv_inst_iter const &operator*() const { return *this; }
217};
218
219struct shader_module {
220    /* the spirv image itself */
221    vector<uint32_t> words;
222    /* a mapping of <id> to the first word of its def. this is useful because walking type
223     * trees, constant expressions, etc requires jumping all over the instruction stream.
224     */
225    unordered_map<unsigned, unsigned> def_index;
226
227    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
228        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
229          def_index() {
230
231        build_def_index(this);
232    }
233
234    /* expose begin() / end() to enable range-based for */
235    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
236    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
237    /* given an offset into the module, produce an iterator there. */
238    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
239
240    /* gets an iterator to the definition of an id */
241    spirv_inst_iter get_def(unsigned id) const {
242        auto it = def_index.find(id);
243        if (it == def_index.end()) {
244            return end();
245        }
246        return at(it->second);
247    }
248};
249
250// TODO : Do we need to guard access to layer_data_map w/ lock?
251static unordered_map<void *, layer_data *> layer_data_map;
252
253// TODO : This can be much smarter, using separate locks for separate global data
254static int globalLockInitialized = 0;
255static loader_platform_thread_mutex globalLock;
256#define MAX_TID 513
257static loader_platform_thread_id g_tidMapping[MAX_TID] = {0};
258static uint32_t g_maxTID = 0;
259#if MTMERGESOURCE
260// MTMERGESOURCE - start of direct pull
261static VkPhysicalDeviceMemoryProperties memProps;
262
263static void clear_cmd_buf_and_mem_references(layer_data *my_data, const VkCommandBuffer cb);
264
265#define MAX_BINDING 0xFFFFFFFF
266
267static MT_OBJ_BINDING_INFO *get_object_binding_info(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
268    MT_OBJ_BINDING_INFO *retValue = NULL;
269    switch (type) {
270    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
271        auto it = my_data->imageBindingMap.find(handle);
272        if (it != my_data->imageBindingMap.end())
273            return &(*it).second;
274        break;
275    }
276    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
277        auto it = my_data->bufferBindingMap.find(handle);
278        if (it != my_data->bufferBindingMap.end())
279            return &(*it).second;
280        break;
281    }
282    default:
283        break;
284    }
285    return retValue;
286}
287// MTMERGESOURCE - end section
288#endif
289template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map);
290
291// prototype
292static GLOBAL_CB_NODE *getCBNode(layer_data *, const VkCommandBuffer);
293
294#if MTMERGESOURCE
295static void delete_queue_info_list(layer_data *my_data) {
296    // Process queue list, cleaning up each entry before deleting
297    my_data->queueMap.clear();
298}
299
300// Delete CBInfo from container and clear mem references to CB
301static void delete_cmd_buf_info(layer_data *my_data, VkCommandPool commandPool, const VkCommandBuffer cb) {
302    clear_cmd_buf_and_mem_references(my_data, cb);
303    // Delete the CBInfo info
304    my_data->commandPoolMap[commandPool].commandBuffers.remove(cb);
305    my_data->commandBufferMap.erase(cb);
306}
307
308static void add_object_binding_info(layer_data *my_data, const uint64_t handle, const VkDebugReportObjectTypeEXT type,
309                                    const VkDeviceMemory mem) {
310    switch (type) {
311    // Buffers and images are unique as their CreateInfo is in container struct
312    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
313        auto pCI = &my_data->bufferBindingMap[handle];
314        pCI->mem = mem;
315        break;
316    }
317    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
318        auto pCI = &my_data->imageBindingMap[handle];
319        pCI->mem = mem;
320        break;
321    }
322    default:
323        break;
324    }
325}
326
327static void add_object_create_info(layer_data *my_data, const uint64_t handle, const VkDebugReportObjectTypeEXT type,
328                                   const void *pCreateInfo) {
329    // TODO : For any CreateInfo struct that has ptrs, need to deep copy them and appropriately clean up on Destroy
330    switch (type) {
331    // Buffers and images are unique as their CreateInfo is in container struct
332    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
333        auto pCI = &my_data->bufferBindingMap[handle];
334        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
335        memcpy(&pCI->create_info.buffer, pCreateInfo, sizeof(VkBufferCreateInfo));
336        break;
337    }
338    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
339        auto pCI = &my_data->imageBindingMap[handle];
340        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
341        memcpy(&pCI->create_info.image, pCreateInfo, sizeof(VkImageCreateInfo));
342        break;
343    }
344    // Swap Chain is very unique, use my_data->imageBindingMap, but copy in
345    // SwapChainCreatInfo's usage flags and set the mem value to a unique key. These is used by
346    // vkCreateImageView and internal mem_tracker routines to distinguish swap chain images
347    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT: {
348        auto pCI = &my_data->imageBindingMap[handle];
349        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
350        pCI->mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
351        pCI->valid = false;
352        pCI->create_info.image.usage =
353            const_cast<VkSwapchainCreateInfoKHR *>(static_cast<const VkSwapchainCreateInfoKHR *>(pCreateInfo))->imageUsage;
354        break;
355    }
356    default:
357        break;
358    }
359}
360
361// Add a fence, creating one if necessary to our list of fences/fenceIds
362static VkBool32 add_fence_info(layer_data *my_data, VkFence fence, VkQueue queue, uint64_t *fenceId) {
363    VkBool32 skipCall = VK_FALSE;
364    *fenceId = my_data->currentFenceId++;
365
366    // If no fence, create an internal fence to track the submissions
367    if (fence != VK_NULL_HANDLE) {
368        my_data->fenceMap[fence].fenceId = *fenceId;
369        my_data->fenceMap[fence].queue = queue;
370        // Validate that fence is in UNSIGNALED state
371        VkFenceCreateInfo *pFenceCI = &(my_data->fenceMap[fence].createInfo);
372        if (pFenceCI->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
373            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
374                               (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
375                               "Fence %#" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
376                               (uint64_t)fence);
377        }
378    } else {
379        // TODO : Do we need to create an internal fence here for tracking purposes?
380    }
381    // Update most recently submitted fence and fenceId for Queue
382    my_data->queueMap[queue].lastSubmittedId = *fenceId;
383    return skipCall;
384}
385
386// Remove a fenceInfo from our list of fences/fenceIds
387static void delete_fence_info(layer_data *my_data, VkFence fence) { my_data->fenceMap.erase(fence); }
388
389// Record information when a fence is known to be signalled
390static void update_fence_tracking(layer_data *my_data, VkFence fence) {
391    auto fence_item = my_data->fenceMap.find(fence);
392    if (fence_item != my_data->fenceMap.end()) {
393        FENCE_NODE *pCurFenceInfo = &(*fence_item).second;
394        VkQueue queue = pCurFenceInfo->queue;
395        auto queue_item = my_data->queueMap.find(queue);
396        if (queue_item != my_data->queueMap.end()) {
397            QUEUE_NODE *pQueueInfo = &(*queue_item).second;
398            if (pQueueInfo->lastRetiredId < pCurFenceInfo->fenceId) {
399                pQueueInfo->lastRetiredId = pCurFenceInfo->fenceId;
400            }
401        }
402    }
403
404    // Update fence state in fenceCreateInfo structure
405    auto pFCI = &(my_data->fenceMap[fence].createInfo);
406    pFCI->flags = static_cast<VkFenceCreateFlags>(pFCI->flags | VK_FENCE_CREATE_SIGNALED_BIT);
407}
408
409// Helper routine that updates the fence list for a specific queue to all-retired
410static void retire_queue_fences(layer_data *my_data, VkQueue queue) {
411    QUEUE_NODE *pQueueInfo = &my_data->queueMap[queue];
412    // Set queue's lastRetired to lastSubmitted indicating all fences completed
413    pQueueInfo->lastRetiredId = pQueueInfo->lastSubmittedId;
414}
415
416// Helper routine that updates all queues to all-retired
417static void retire_device_fences(layer_data *my_data, VkDevice device) {
418    // Process each queue for device
419    // TODO: Add multiple device support
420    for (auto ii = my_data->queueMap.begin(); ii != my_data->queueMap.end(); ++ii) {
421        // Set queue's lastRetired to lastSubmitted indicating all fences completed
422        QUEUE_NODE *pQueueInfo = &(*ii).second;
423        pQueueInfo->lastRetiredId = pQueueInfo->lastSubmittedId;
424    }
425}
426
427// Helper function to validate correct usage bits set for buffers or images
428//  Verify that (actual & desired) flags != 0 or,
429//   if strict is true, verify that (actual & desired) flags == desired
430//  In case of error, report it via dbg callbacks
431static VkBool32 validate_usage_flags(layer_data *my_data, void *disp_obj, VkFlags actual, VkFlags desired, VkBool32 strict,
432                                     uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
433                                     char const *func_name, char const *usage_str) {
434    VkBool32 correct_usage = VK_FALSE;
435    VkBool32 skipCall = VK_FALSE;
436    if (strict)
437        correct_usage = ((actual & desired) == desired);
438    else
439        correct_usage = ((actual & desired) != 0);
440    if (!correct_usage) {
441        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
442                           MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s %#" PRIxLEAST64
443                                                               " used by %s. In this case, %s should have %s set during creation.",
444                           ty_str, obj_handle, func_name, ty_str, usage_str);
445    }
446    return skipCall;
447}
448
449// Helper function to validate usage flags for images
450// Pulls image info and then sends actual vs. desired usage off to helper above where
451//  an error will be flagged if usage is not correct
452static VkBool32 validate_image_usage_flags(layer_data *my_data, void *disp_obj, VkImage image, VkFlags desired, VkBool32 strict,
453                                           char const *func_name, char const *usage_string) {
454    VkBool32 skipCall = VK_FALSE;
455    MT_OBJ_BINDING_INFO *pBindInfo = get_object_binding_info(my_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
456    if (pBindInfo) {
457        skipCall = validate_usage_flags(my_data, disp_obj, pBindInfo->create_info.image.usage, desired, strict, (uint64_t)image,
458                                        VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "image", func_name, usage_string);
459    }
460    return skipCall;
461}
462
463// Helper function to validate usage flags for buffers
464// Pulls buffer info and then sends actual vs. desired usage off to helper above where
465//  an error will be flagged if usage is not correct
466static VkBool32 validate_buffer_usage_flags(layer_data *my_data, void *disp_obj, VkBuffer buffer, VkFlags desired, VkBool32 strict,
467                                            char const *func_name, char const *usage_string) {
468    VkBool32 skipCall = VK_FALSE;
469    MT_OBJ_BINDING_INFO *pBindInfo = get_object_binding_info(my_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
470    if (pBindInfo) {
471        skipCall = validate_usage_flags(my_data, disp_obj, pBindInfo->create_info.buffer.usage, desired, strict, (uint64_t)buffer,
472                                        VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string);
473    }
474    return skipCall;
475}
476
477// Return ptr to info in map container containing mem, or NULL if not found
478//  Calls to this function should be wrapped in mutex
479static DEVICE_MEM_INFO *get_mem_obj_info(layer_data *dev_data, const VkDeviceMemory mem) {
480    auto item = dev_data->memObjMap.find(mem);
481    if (item != dev_data->memObjMap.end()) {
482        return &(*item).second;
483    } else {
484        return NULL;
485    }
486}
487
488static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
489                             const VkMemoryAllocateInfo *pAllocateInfo) {
490    assert(object != NULL);
491
492    memcpy(&my_data->memObjMap[mem].allocInfo, pAllocateInfo, sizeof(VkMemoryAllocateInfo));
493    // TODO:  Update for real hardware, actually process allocation info structures
494    my_data->memObjMap[mem].allocInfo.pNext = NULL;
495    my_data->memObjMap[mem].object = object;
496    my_data->memObjMap[mem].refCount = 0;
497    my_data->memObjMap[mem].mem = mem;
498    my_data->memObjMap[mem].image = VK_NULL_HANDLE;
499    my_data->memObjMap[mem].memRange.offset = 0;
500    my_data->memObjMap[mem].memRange.size = 0;
501    my_data->memObjMap[mem].pData = 0;
502    my_data->memObjMap[mem].pDriverData = 0;
503    my_data->memObjMap[mem].valid = false;
504}
505
506static VkBool32 validate_memory_is_valid(layer_data *dev_data, VkDeviceMemory mem, const char *functionName,
507                                         VkImage image = VK_NULL_HANDLE) {
508    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
509        MT_OBJ_BINDING_INFO *pBindInfo =
510            get_object_binding_info(dev_data, reinterpret_cast<const uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
511        if (pBindInfo && !pBindInfo->valid) {
512            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
513                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
514                           "%s: Cannot read invalid swapchain image %" PRIx64 ", please fill the memory before using.",
515                           functionName, (uint64_t)(image));
516        }
517    } else {
518        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
519        if (pMemObj && !pMemObj->valid) {
520            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
521                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
522                           "%s: Cannot read invalid memory %" PRIx64 ", please fill the memory before using.", functionName,
523                           (uint64_t)(mem));
524        }
525    }
526    return false;
527}
528
529static void set_memory_valid(layer_data *dev_data, VkDeviceMemory mem, bool valid, VkImage image = VK_NULL_HANDLE) {
530    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
531        MT_OBJ_BINDING_INFO *pBindInfo =
532            get_object_binding_info(dev_data, reinterpret_cast<const uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
533        if (pBindInfo) {
534            pBindInfo->valid = valid;
535        }
536    } else {
537        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
538        if (pMemObj) {
539            pMemObj->valid = valid;
540        }
541    }
542}
543
544// Find CB Info and add mem reference to list container
545// Find Mem Obj Info and add CB reference to list container
546static VkBool32 update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
547                                                  const char *apiName) {
548    VkBool32 skipCall = VK_FALSE;
549
550    // Skip validation if this image was created through WSI
551    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
552
553        // First update CB binding in MemObj mini CB list
554        DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
555        if (pMemInfo) {
556            // Search for cmd buffer object in memory object's binding list
557            VkBool32 found = VK_FALSE;
558            if (pMemInfo->pCommandBufferBindings.size() > 0) {
559                for (list<VkCommandBuffer>::iterator it = pMemInfo->pCommandBufferBindings.begin();
560                     it != pMemInfo->pCommandBufferBindings.end(); ++it) {
561                    if ((*it) == cb) {
562                        found = VK_TRUE;
563                        break;
564                    }
565                }
566            }
567            // If not present, add to list
568            if (found == VK_FALSE) {
569                pMemInfo->pCommandBufferBindings.push_front(cb);
570                pMemInfo->refCount++;
571            }
572            // Now update CBInfo's Mem reference list
573            GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
574            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
575            if (pCBNode) {
576                // Search for memory object in cmd buffer's reference list
577                VkBool32 found = VK_FALSE;
578                if (pCBNode->pMemObjList.size() > 0) {
579                    for (auto it = pCBNode->pMemObjList.begin(); it != pCBNode->pMemObjList.end(); ++it) {
580                        if ((*it) == mem) {
581                            found = VK_TRUE;
582                            break;
583                        }
584                    }
585                }
586                // If not present, add to list
587                if (found == VK_FALSE) {
588                    pCBNode->pMemObjList.push_front(mem);
589                }
590            }
591        }
592    }
593    return skipCall;
594}
595
596// Free bindings related to CB
597static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
598    GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
599
600    if (pCBNode) {
601        if (pCBNode->pMemObjList.size() > 0) {
602            list<VkDeviceMemory> mem_obj_list = pCBNode->pMemObjList;
603            for (list<VkDeviceMemory>::iterator it = mem_obj_list.begin(); it != mem_obj_list.end(); ++it) {
604                DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, *it);
605                if (pInfo) {
606                    pInfo->pCommandBufferBindings.remove(cb);
607                    pInfo->refCount--;
608                }
609            }
610            pCBNode->pMemObjList.clear();
611        }
612        pCBNode->activeDescriptorSets.clear();
613        pCBNode->validate_functions.clear();
614    }
615}
616
617// Delete the entire CB list
618static void delete_cmd_buf_info_list(layer_data *my_data) {
619    for (auto &cb_node : my_data->commandBufferMap) {
620        clear_cmd_buf_and_mem_references(my_data, cb_node.first);
621    }
622    my_data->commandBufferMap.clear();
623}
624
625// For given MemObjInfo, report Obj & CB bindings
626static VkBool32 reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
627    VkBool32 skipCall = VK_FALSE;
628    size_t cmdBufRefCount = pMemObjInfo->pCommandBufferBindings.size();
629    size_t objRefCount = pMemObjInfo->pObjBindings.size();
630
631    if ((pMemObjInfo->pCommandBufferBindings.size()) != 0) {
632        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
633                           (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
634                           "Attempting to free memory object %#" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
635                           " references",
636                           (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
637    }
638
639    if (cmdBufRefCount > 0 && pMemObjInfo->pCommandBufferBindings.size() > 0) {
640        for (list<VkCommandBuffer>::const_iterator it = pMemObjInfo->pCommandBufferBindings.begin();
641             it != pMemObjInfo->pCommandBufferBindings.end(); ++it) {
642            // TODO : CommandBuffer should be source Obj here
643            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
644                    (uint64_t)(*it), __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
645                    "Command Buffer %p still has a reference to mem obj %#" PRIxLEAST64, (*it), (uint64_t)pMemObjInfo->mem);
646        }
647        // Clear the list of hanging references
648        pMemObjInfo->pCommandBufferBindings.clear();
649    }
650
651    if (objRefCount > 0 && pMemObjInfo->pObjBindings.size() > 0) {
652        for (auto it = pMemObjInfo->pObjBindings.begin(); it != pMemObjInfo->pObjBindings.end(); ++it) {
653            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, it->type, it->handle, __LINE__,
654                    MEMTRACK_FREED_MEM_REF, "MEM", "VK Object %#" PRIxLEAST64 " still has a reference to mem obj %#" PRIxLEAST64,
655                    it->handle, (uint64_t)pMemObjInfo->mem);
656        }
657        // Clear the list of hanging references
658        pMemObjInfo->pObjBindings.clear();
659    }
660    return skipCall;
661}
662
663static VkBool32 deleteMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem) {
664    VkBool32 skipCall = VK_FALSE;
665    auto item = my_data->memObjMap.find(mem);
666    if (item != my_data->memObjMap.end()) {
667        my_data->memObjMap.erase(item);
668    } else {
669        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
670                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
671                           "Request to delete memory object %#" PRIxLEAST64 " not present in memory Object Map", (uint64_t)mem);
672    }
673    return skipCall;
674}
675
676// Check if fence for given CB is completed
677static bool checkCBCompleted(layer_data *my_data, const VkCommandBuffer cb, bool *complete) {
678    GLOBAL_CB_NODE *pCBNode = getCBNode(my_data, cb);
679    VkBool32 skipCall = false;
680    *complete = true;
681
682    if (pCBNode) {
683        if (pCBNode->lastSubmittedQueue != NULL) {
684            VkQueue queue = pCBNode->lastSubmittedQueue;
685            QUEUE_NODE *pQueueInfo = &my_data->queueMap[queue];
686            if (pCBNode->fenceId > pQueueInfo->lastRetiredId) {
687                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
688                                   VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)cb, __LINE__, MEMTRACK_NONE, "MEM",
689                                   "fence %#" PRIxLEAST64 " for CB %p has not been checked for completion",
690                                   (uint64_t)pCBNode->lastSubmittedFence, cb);
691                *complete = false;
692            }
693        }
694    }
695    return skipCall;
696}
697
698static VkBool32 freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, VkBool32 internal) {
699    VkBool32 skipCall = VK_FALSE;
700    // Parse global list to find info w/ mem
701    DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
702    if (pInfo) {
703        if (pInfo->allocInfo.allocationSize == 0 && !internal) {
704            // TODO: Verify against Valid Use section
705            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
706                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
707                               "Attempting to free memory associated with a Persistent Image, %#" PRIxLEAST64 ", "
708                               "this should not be explicitly freed\n",
709                               (uint64_t)mem);
710        } else {
711            // Clear any CB bindings for completed CBs
712            //   TODO : Is there a better place to do this?
713
714            bool commandBufferComplete = false;
715            assert(pInfo->object != VK_NULL_HANDLE);
716            list<VkCommandBuffer>::iterator it = pInfo->pCommandBufferBindings.begin();
717            list<VkCommandBuffer>::iterator temp;
718            while (pInfo->pCommandBufferBindings.size() > 0 && it != pInfo->pCommandBufferBindings.end()) {
719                skipCall |= checkCBCompleted(dev_data, *it, &commandBufferComplete);
720                if (commandBufferComplete) {
721                    temp = it;
722                    ++temp;
723                    clear_cmd_buf_and_mem_references(dev_data, *it);
724                    it = temp;
725                } else {
726                    ++it;
727                }
728            }
729
730            // Now verify that no references to this mem obj remain and remove bindings
731            if (0 != pInfo->refCount) {
732                skipCall |= reportMemReferencesAndCleanUp(dev_data, pInfo);
733            }
734            // Delete mem obj info
735            skipCall |= deleteMemObjInfo(dev_data, object, mem);
736        }
737    }
738    return skipCall;
739}
740
741static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
742    switch (type) {
743    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
744        return "image";
745        break;
746    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
747        return "buffer";
748        break;
749    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
750        return "swapchain";
751        break;
752    default:
753        return "unknown";
754    }
755}
756
757// Remove object binding performs 3 tasks:
758// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
759// 2. Decrement refCount for MemObjInfo
760// 3. Clear mem binding for image/buffer by setting its handle to 0
761// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
762static VkBool32 clear_object_binding(layer_data *dev_data, void *dispObj, uint64_t handle, VkDebugReportObjectTypeEXT type) {
763    // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
764    VkBool32 skipCall = VK_FALSE;
765    MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
766    if (pObjBindInfo) {
767        DEVICE_MEM_INFO *pMemObjInfo = get_mem_obj_info(dev_data, pObjBindInfo->mem);
768        // TODO : Make sure this is a reasonable way to reset mem binding
769        pObjBindInfo->mem = VK_NULL_HANDLE;
770        if (pMemObjInfo) {
771            // This obj is bound to a memory object. Remove the reference to this object in that memory object's list, decrement the
772            // memObj's refcount
773            // and set the objects memory binding pointer to NULL.
774            VkBool32 clearSucceeded = VK_FALSE;
775            for (auto it = pMemObjInfo->pObjBindings.begin(); it != pMemObjInfo->pObjBindings.end(); ++it) {
776                if ((it->handle == handle) && (it->type == type)) {
777                    pMemObjInfo->refCount--;
778                    pMemObjInfo->pObjBindings.erase(it);
779                    clearSucceeded = VK_TRUE;
780                    break;
781                }
782            }
783            if (VK_FALSE == clearSucceeded) {
784                skipCall |=
785                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
786                            "MEM", "While trying to clear mem binding for %s obj %#" PRIxLEAST64
787                                   ", unable to find that object referenced by mem obj %#" PRIxLEAST64,
788                            object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
789            }
790        }
791    }
792    return skipCall;
793}
794
795// For NULL mem case, output warning
796// Make sure given object is in global object map
797//  IF a previous binding existed, output validation error
798//  Otherwise, add reference from objectInfo to memoryInfo
799//  Add reference off of objInfo
800//  device is required for error logging, need a dispatchable
801//  object for that.
802static VkBool32 set_mem_binding(layer_data *dev_data, void *dispatch_object, VkDeviceMemory mem, uint64_t handle,
803                                VkDebugReportObjectTypeEXT type, const char *apiName) {
804    VkBool32 skipCall = VK_FALSE;
805    // Handle NULL case separately, just clear previous binding & decrement reference
806    if (mem == VK_NULL_HANDLE) {
807        // TODO: Verify against Valid Use section of spec.
808        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
809                           "MEM", "In %s, attempting to Bind Obj(%#" PRIxLEAST64 ") to NULL", apiName, handle);
810    } else {
811        MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
812        if (!pObjBindInfo) {
813            skipCall |=
814                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS,
815                        "MEM", "In %s, attempting to update Binding of %s Obj(%#" PRIxLEAST64 ") that's not in global list()",
816                        object_type_to_string(type), apiName, handle);
817        } else {
818            // non-null case so should have real mem obj
819            DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
820            if (pMemInfo) {
821                // TODO : Need to track mem binding for obj and report conflict here
822                DEVICE_MEM_INFO *pPrevBinding = get_mem_obj_info(dev_data, pObjBindInfo->mem);
823                if (pPrevBinding != NULL) {
824                    skipCall |=
825                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
826                                (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
827                                "In %s, attempting to bind memory (%#" PRIxLEAST64 ") to object (%#" PRIxLEAST64
828                                ") which has already been bound to mem object %#" PRIxLEAST64,
829                                apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem);
830                } else {
831                    MT_OBJ_HANDLE_TYPE oht;
832                    oht.handle = handle;
833                    oht.type = type;
834                    pMemInfo->pObjBindings.push_front(oht);
835                    pMemInfo->refCount++;
836                    // For image objects, make sure default memory state is correctly set
837                    // TODO : What's the best/correct way to handle this?
838                    if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
839                        VkImageCreateInfo ici = pObjBindInfo->create_info.image;
840                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
841                            // TODO::  More memory state transition stuff.
842                        }
843                    }
844                    pObjBindInfo->mem = mem;
845                }
846            }
847        }
848    }
849    return skipCall;
850}
851
852// For NULL mem case, clear any previous binding Else...
853// Make sure given object is in its object map
854//  IF a previous binding existed, update binding
855//  Add reference from objectInfo to memoryInfo
856//  Add reference off of object's binding info
857// Return VK_TRUE if addition is successful, VK_FALSE otherwise
858static VkBool32 set_sparse_mem_binding(layer_data *dev_data, void *dispObject, VkDeviceMemory mem, uint64_t handle,
859                                       VkDebugReportObjectTypeEXT type, const char *apiName) {
860    VkBool32 skipCall = VK_FALSE;
861    // Handle NULL case separately, just clear previous binding & decrement reference
862    if (mem == VK_NULL_HANDLE) {
863        skipCall = clear_object_binding(dev_data, dispObject, handle, type);
864    } else {
865        MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
866        if (!pObjBindInfo) {
867            skipCall |= log_msg(
868                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS, "MEM",
869                "In %s, attempting to update Binding of Obj(%#" PRIxLEAST64 ") that's not in global list()", apiName, handle);
870        }
871        // non-null case so should have real mem obj
872        DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
873        if (pInfo) {
874            // Search for object in memory object's binding list
875            VkBool32 found = VK_FALSE;
876            if (pInfo->pObjBindings.size() > 0) {
877                for (auto it = pInfo->pObjBindings.begin(); it != pInfo->pObjBindings.end(); ++it) {
878                    if (((*it).handle == handle) && ((*it).type == type)) {
879                        found = VK_TRUE;
880                        break;
881                    }
882                }
883            }
884            // If not present, add to list
885            if (found == VK_FALSE) {
886                MT_OBJ_HANDLE_TYPE oht;
887                oht.handle = handle;
888                oht.type = type;
889                pInfo->pObjBindings.push_front(oht);
890                pInfo->refCount++;
891            }
892            // Need to set mem binding for this object
893            pObjBindInfo->mem = mem;
894        }
895    }
896    return skipCall;
897}
898
899template <typename T>
900void print_object_map_members(layer_data *my_data, void *dispObj, T const &objectName, VkDebugReportObjectTypeEXT objectType,
901                              const char *objectStr) {
902    for (auto const &element : objectName) {
903        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objectType, 0, __LINE__, MEMTRACK_NONE, "MEM",
904                "    %s Object list contains %s Object %#" PRIxLEAST64 " ", objectStr, objectStr, element.first);
905    }
906}
907
908// For given Object, get 'mem' obj that it's bound to or NULL if no binding
909static VkBool32 get_mem_binding_from_object(layer_data *my_data, void *dispObj, const uint64_t handle,
910                                            const VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
911    VkBool32 skipCall = VK_FALSE;
912    *mem = VK_NULL_HANDLE;
913    MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(my_data, handle, type);
914    if (pObjBindInfo) {
915        if (pObjBindInfo->mem) {
916            *mem = pObjBindInfo->mem;
917        } else {
918            skipCall =
919                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS,
920                        "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but object has no mem binding", handle);
921        }
922    } else {
923        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
924                           "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but no such object in %s list", handle,
925                           object_type_to_string(type));
926    }
927    return skipCall;
928}
929
930// Print details of MemObjInfo list
931static void print_mem_list(layer_data *dev_data, void *dispObj) {
932    DEVICE_MEM_INFO *pInfo = NULL;
933
934    // Early out if info is not requested
935    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
936        return;
937    }
938
939    // Just printing each msg individually for now, may want to package these into single large print
940    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
941            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
942            dev_data->memObjMap.size());
943    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
944            MEMTRACK_NONE, "MEM", "=============================");
945
946    if (dev_data->memObjMap.size() <= 0)
947        return;
948
949    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
950        pInfo = &(*ii).second;
951
952        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
953                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at %p===", (void *)pInfo);
954        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
955                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: %#" PRIxLEAST64, (uint64_t)(pInfo->mem));
956        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
957                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: %u", pInfo->refCount);
958        if (0 != pInfo->allocInfo.allocationSize) {
959            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&pInfo->allocInfo, "MEM(INFO):         ");
960            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
961                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
962        } else {
963            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
964                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
965        }
966
967        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
968                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
969                pInfo->pObjBindings.size());
970        if (pInfo->pObjBindings.size() > 0) {
971            for (list<MT_OBJ_HANDLE_TYPE>::iterator it = pInfo->pObjBindings.begin(); it != pInfo->pObjBindings.end(); ++it) {
972                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
973                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT %" PRIu64, it->handle);
974            }
975        }
976
977        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
978                __LINE__, MEMTRACK_NONE, "MEM",
979                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
980                pInfo->pCommandBufferBindings.size());
981        if (pInfo->pCommandBufferBindings.size() > 0) {
982            for (list<VkCommandBuffer>::iterator it = pInfo->pCommandBufferBindings.begin();
983                 it != pInfo->pCommandBufferBindings.end(); ++it) {
984                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
985                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB %p", (*it));
986            }
987        }
988    }
989}
990
991static void printCBList(layer_data *my_data, void *dispObj) {
992    GLOBAL_CB_NODE *pCBInfo = NULL;
993
994    // Early out if info is not requested
995    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
996        return;
997    }
998
999    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
1000            MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
1001            my_data->commandBufferMap.size());
1002    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
1003            MEMTRACK_NONE, "MEM", "==================");
1004
1005    if (my_data->commandBufferMap.size() <= 0)
1006        return;
1007
1008    for (auto &cb_node : my_data->commandBufferMap) {
1009        pCBInfo = cb_node.second;
1010
1011        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
1012                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (%p) has CB %p, fenceId %" PRIx64 ", and fence %#" PRIxLEAST64,
1013                (void *)pCBInfo, (void *)pCBInfo->commandBuffer, pCBInfo->fenceId, (uint64_t)pCBInfo->lastSubmittedFence);
1014
1015        if (pCBInfo->pMemObjList.size() <= 0)
1016            continue;
1017        for (list<VkDeviceMemory>::iterator it = pCBInfo->pMemObjList.begin(); it != pCBInfo->pMemObjList.end(); ++it) {
1018            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
1019                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj %" PRIu64, (uint64_t)(*it));
1020        }
1021    }
1022}
1023
1024#endif
1025
1026// Map actual TID to an index value and return that index
1027//  This keeps TIDs in range from 0-MAX_TID and simplifies compares between runs
1028static uint32_t getTIDIndex() {
1029    loader_platform_thread_id tid = loader_platform_get_thread_id();
1030    for (uint32_t i = 0; i < g_maxTID; i++) {
1031        if (tid == g_tidMapping[i])
1032            return i;
1033    }
1034    // Don't yet have mapping, set it and return newly set index
1035    uint32_t retVal = (uint32_t)g_maxTID;
1036    g_tidMapping[g_maxTID++] = tid;
1037    assert(g_maxTID < MAX_TID);
1038    return retVal;
1039}
1040
1041// Return a string representation of CMD_TYPE enum
1042static string cmdTypeToString(CMD_TYPE cmd) {
1043    switch (cmd) {
1044    case CMD_BINDPIPELINE:
1045        return "CMD_BINDPIPELINE";
1046    case CMD_BINDPIPELINEDELTA:
1047        return "CMD_BINDPIPELINEDELTA";
1048    case CMD_SETVIEWPORTSTATE:
1049        return "CMD_SETVIEWPORTSTATE";
1050    case CMD_SETLINEWIDTHSTATE:
1051        return "CMD_SETLINEWIDTHSTATE";
1052    case CMD_SETDEPTHBIASSTATE:
1053        return "CMD_SETDEPTHBIASSTATE";
1054    case CMD_SETBLENDSTATE:
1055        return "CMD_SETBLENDSTATE";
1056    case CMD_SETDEPTHBOUNDSSTATE:
1057        return "CMD_SETDEPTHBOUNDSSTATE";
1058    case CMD_SETSTENCILREADMASKSTATE:
1059        return "CMD_SETSTENCILREADMASKSTATE";
1060    case CMD_SETSTENCILWRITEMASKSTATE:
1061        return "CMD_SETSTENCILWRITEMASKSTATE";
1062    case CMD_SETSTENCILREFERENCESTATE:
1063        return "CMD_SETSTENCILREFERENCESTATE";
1064    case CMD_BINDDESCRIPTORSETS:
1065        return "CMD_BINDDESCRIPTORSETS";
1066    case CMD_BINDINDEXBUFFER:
1067        return "CMD_BINDINDEXBUFFER";
1068    case CMD_BINDVERTEXBUFFER:
1069        return "CMD_BINDVERTEXBUFFER";
1070    case CMD_DRAW:
1071        return "CMD_DRAW";
1072    case CMD_DRAWINDEXED:
1073        return "CMD_DRAWINDEXED";
1074    case CMD_DRAWINDIRECT:
1075        return "CMD_DRAWINDIRECT";
1076    case CMD_DRAWINDEXEDINDIRECT:
1077        return "CMD_DRAWINDEXEDINDIRECT";
1078    case CMD_DISPATCH:
1079        return "CMD_DISPATCH";
1080    case CMD_DISPATCHINDIRECT:
1081        return "CMD_DISPATCHINDIRECT";
1082    case CMD_COPYBUFFER:
1083        return "CMD_COPYBUFFER";
1084    case CMD_COPYIMAGE:
1085        return "CMD_COPYIMAGE";
1086    case CMD_BLITIMAGE:
1087        return "CMD_BLITIMAGE";
1088    case CMD_COPYBUFFERTOIMAGE:
1089        return "CMD_COPYBUFFERTOIMAGE";
1090    case CMD_COPYIMAGETOBUFFER:
1091        return "CMD_COPYIMAGETOBUFFER";
1092    case CMD_CLONEIMAGEDATA:
1093        return "CMD_CLONEIMAGEDATA";
1094    case CMD_UPDATEBUFFER:
1095        return "CMD_UPDATEBUFFER";
1096    case CMD_FILLBUFFER:
1097        return "CMD_FILLBUFFER";
1098    case CMD_CLEARCOLORIMAGE:
1099        return "CMD_CLEARCOLORIMAGE";
1100    case CMD_CLEARATTACHMENTS:
1101        return "CMD_CLEARCOLORATTACHMENT";
1102    case CMD_CLEARDEPTHSTENCILIMAGE:
1103        return "CMD_CLEARDEPTHSTENCILIMAGE";
1104    case CMD_RESOLVEIMAGE:
1105        return "CMD_RESOLVEIMAGE";
1106    case CMD_SETEVENT:
1107        return "CMD_SETEVENT";
1108    case CMD_RESETEVENT:
1109        return "CMD_RESETEVENT";
1110    case CMD_WAITEVENTS:
1111        return "CMD_WAITEVENTS";
1112    case CMD_PIPELINEBARRIER:
1113        return "CMD_PIPELINEBARRIER";
1114    case CMD_BEGINQUERY:
1115        return "CMD_BEGINQUERY";
1116    case CMD_ENDQUERY:
1117        return "CMD_ENDQUERY";
1118    case CMD_RESETQUERYPOOL:
1119        return "CMD_RESETQUERYPOOL";
1120    case CMD_COPYQUERYPOOLRESULTS:
1121        return "CMD_COPYQUERYPOOLRESULTS";
1122    case CMD_WRITETIMESTAMP:
1123        return "CMD_WRITETIMESTAMP";
1124    case CMD_INITATOMICCOUNTERS:
1125        return "CMD_INITATOMICCOUNTERS";
1126    case CMD_LOADATOMICCOUNTERS:
1127        return "CMD_LOADATOMICCOUNTERS";
1128    case CMD_SAVEATOMICCOUNTERS:
1129        return "CMD_SAVEATOMICCOUNTERS";
1130    case CMD_BEGINRENDERPASS:
1131        return "CMD_BEGINRENDERPASS";
1132    case CMD_ENDRENDERPASS:
1133        return "CMD_ENDRENDERPASS";
1134    default:
1135        return "UNKNOWN";
1136    }
1137}
1138
1139// SPIRV utility functions
1140static void build_def_index(shader_module *module) {
1141    for (auto insn : *module) {
1142        switch (insn.opcode()) {
1143        /* Types */
1144        case spv::OpTypeVoid:
1145        case spv::OpTypeBool:
1146        case spv::OpTypeInt:
1147        case spv::OpTypeFloat:
1148        case spv::OpTypeVector:
1149        case spv::OpTypeMatrix:
1150        case spv::OpTypeImage:
1151        case spv::OpTypeSampler:
1152        case spv::OpTypeSampledImage:
1153        case spv::OpTypeArray:
1154        case spv::OpTypeRuntimeArray:
1155        case spv::OpTypeStruct:
1156        case spv::OpTypeOpaque:
1157        case spv::OpTypePointer:
1158        case spv::OpTypeFunction:
1159        case spv::OpTypeEvent:
1160        case spv::OpTypeDeviceEvent:
1161        case spv::OpTypeReserveId:
1162        case spv::OpTypeQueue:
1163        case spv::OpTypePipe:
1164            module->def_index[insn.word(1)] = insn.offset();
1165            break;
1166
1167        /* Fixed constants */
1168        case spv::OpConstantTrue:
1169        case spv::OpConstantFalse:
1170        case spv::OpConstant:
1171        case spv::OpConstantComposite:
1172        case spv::OpConstantSampler:
1173        case spv::OpConstantNull:
1174            module->def_index[insn.word(2)] = insn.offset();
1175            break;
1176
1177        /* Specialization constants */
1178        case spv::OpSpecConstantTrue:
1179        case spv::OpSpecConstantFalse:
1180        case spv::OpSpecConstant:
1181        case spv::OpSpecConstantComposite:
1182        case spv::OpSpecConstantOp:
1183            module->def_index[insn.word(2)] = insn.offset();
1184            break;
1185
1186        /* Variables */
1187        case spv::OpVariable:
1188            module->def_index[insn.word(2)] = insn.offset();
1189            break;
1190
1191        /* Functions */
1192        case spv::OpFunction:
1193            module->def_index[insn.word(2)] = insn.offset();
1194            break;
1195
1196        default:
1197            /* We don't care about any other defs for now. */
1198            break;
1199        }
1200    }
1201}
1202
1203static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1204    for (auto insn : *src) {
1205        if (insn.opcode() == spv::OpEntryPoint) {
1206            auto entrypointName = (char const *)&insn.word(3);
1207            auto entrypointStageBits = 1u << insn.word(1);
1208
1209            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1210                return insn;
1211            }
1212        }
1213    }
1214
1215    return src->end();
1216}
1217
1218bool shader_is_spirv(VkShaderModuleCreateInfo const *pCreateInfo) {
1219    uint32_t *words = (uint32_t *)pCreateInfo->pCode;
1220    size_t sizeInWords = pCreateInfo->codeSize / sizeof(uint32_t);
1221
1222    /* Just validate that the header makes sense. */
1223    return sizeInWords >= 5 && words[0] == spv::MagicNumber && words[1] == spv::Version;
1224}
1225
1226static char const *storage_class_name(unsigned sc) {
1227    switch (sc) {
1228    case spv::StorageClassInput:
1229        return "input";
1230    case spv::StorageClassOutput:
1231        return "output";
1232    case spv::StorageClassUniformConstant:
1233        return "const uniform";
1234    case spv::StorageClassUniform:
1235        return "uniform";
1236    case spv::StorageClassWorkgroup:
1237        return "workgroup local";
1238    case spv::StorageClassCrossWorkgroup:
1239        return "workgroup global";
1240    case spv::StorageClassPrivate:
1241        return "private global";
1242    case spv::StorageClassFunction:
1243        return "function";
1244    case spv::StorageClassGeneric:
1245        return "generic";
1246    case spv::StorageClassAtomicCounter:
1247        return "atomic counter";
1248    case spv::StorageClassImage:
1249        return "image";
1250    case spv::StorageClassPushConstant:
1251        return "push constant";
1252    default:
1253        return "unknown";
1254    }
1255}
1256
1257/* get the value of an integral constant */
1258unsigned get_constant_value(shader_module const *src, unsigned id) {
1259    auto value = src->get_def(id);
1260    assert(value != src->end());
1261
1262    if (value.opcode() != spv::OpConstant) {
1263        /* TODO: Either ensure that the specialization transform is already performed on a module we're
1264            considering here, OR -- specialize on the fly now.
1265            */
1266        return 1;
1267    }
1268
1269    return value.word(3);
1270}
1271
1272
1273static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1274    auto insn = src->get_def(type);
1275    assert(insn != src->end());
1276
1277    switch (insn.opcode()) {
1278    case spv::OpTypeBool:
1279        ss << "bool";
1280        break;
1281    case spv::OpTypeInt:
1282        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1283        break;
1284    case spv::OpTypeFloat:
1285        ss << "float" << insn.word(2);
1286        break;
1287    case spv::OpTypeVector:
1288        ss << "vec" << insn.word(3) << " of ";
1289        describe_type_inner(ss, src, insn.word(2));
1290        break;
1291    case spv::OpTypeMatrix:
1292        ss << "mat" << insn.word(3) << " of ";
1293        describe_type_inner(ss, src, insn.word(2));
1294        break;
1295    case spv::OpTypeArray:
1296        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1297        describe_type_inner(ss, src, insn.word(2));
1298        break;
1299    case spv::OpTypePointer:
1300        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1301        describe_type_inner(ss, src, insn.word(3));
1302        break;
1303    case spv::OpTypeStruct: {
1304        ss << "struct of (";
1305        for (unsigned i = 2; i < insn.len(); i++) {
1306            describe_type_inner(ss, src, insn.word(i));
1307            if (i == insn.len() - 1) {
1308                ss << ")";
1309            } else {
1310                ss << ", ";
1311            }
1312        }
1313        break;
1314    }
1315    case spv::OpTypeSampler:
1316        ss << "sampler";
1317        break;
1318    case spv::OpTypeSampledImage:
1319        ss << "sampler+";
1320        describe_type_inner(ss, src, insn.word(2));
1321        break;
1322    case spv::OpTypeImage:
1323        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1324        break;
1325    default:
1326        ss << "oddtype";
1327        break;
1328    }
1329}
1330
1331
1332static std::string describe_type(shader_module const *src, unsigned type) {
1333    std::ostringstream ss;
1334    describe_type_inner(ss, src, type);
1335    return ss.str();
1336}
1337
1338
1339static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool b_arrayed) {
1340    /* walk two type trees together, and complain about differences */
1341    auto a_insn = a->get_def(a_type);
1342    auto b_insn = b->get_def(b_type);
1343    assert(a_insn != a->end());
1344    assert(b_insn != b->end());
1345
1346    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1347        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1348        return types_match(a, b, a_type, b_insn.word(2), false);
1349    }
1350
1351    if (a_insn.opcode() != b_insn.opcode()) {
1352        return false;
1353    }
1354
1355    switch (a_insn.opcode()) {
1356    /* if b_arrayed and we hit a leaf type, then we can't match -- there's nowhere for the extra OpTypeArray to be! */
1357    case spv::OpTypeBool:
1358        return true && !b_arrayed;
1359    case spv::OpTypeInt:
1360        /* match on width, signedness */
1361        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3) && !b_arrayed;
1362    case spv::OpTypeFloat:
1363        /* match on width */
1364        return a_insn.word(2) == b_insn.word(2) && !b_arrayed;
1365    case spv::OpTypeVector:
1366    case spv::OpTypeMatrix:
1367        /* match on element type, count. these all have the same layout. we don't get here if
1368         * b_arrayed -- that is handled above. */
1369        return !b_arrayed && types_match(a, b, a_insn.word(2), b_insn.word(2), b_arrayed) && a_insn.word(3) == b_insn.word(3);
1370    case spv::OpTypeArray:
1371        /* match on element type, count. these all have the same layout. we don't get here if
1372         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1373         * not a literal within OpTypeArray */
1374        return !b_arrayed && types_match(a, b, a_insn.word(2), b_insn.word(2), b_arrayed) &&
1375               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1376    case spv::OpTypeStruct:
1377        /* match on all element types */
1378        {
1379            if (b_arrayed) {
1380                /* for the purposes of matching different levels of arrayness, structs are leaves. */
1381                return false;
1382            }
1383
1384            if (a_insn.len() != b_insn.len()) {
1385                return false; /* structs cannot match if member counts differ */
1386            }
1387
1388            for (unsigned i = 2; i < a_insn.len(); i++) {
1389                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), b_arrayed)) {
1390                    return false;
1391                }
1392            }
1393
1394            return true;
1395        }
1396    case spv::OpTypePointer:
1397        /* match on pointee type. storage class is expected to differ */
1398        return types_match(a, b, a_insn.word(3), b_insn.word(3), b_arrayed);
1399
1400    default:
1401        /* remaining types are CLisms, or may not appear in the interfaces we
1402         * are interested in. Just claim no match.
1403         */
1404        return false;
1405    }
1406}
1407
1408static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1409    auto it = map.find(id);
1410    if (it == map.end())
1411        return def;
1412    else
1413        return it->second;
1414}
1415
1416static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1417    auto insn = src->get_def(type);
1418    assert(insn != src->end());
1419
1420    switch (insn.opcode()) {
1421    case spv::OpTypePointer:
1422        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1423         * we're never actually passing pointers around. */
1424        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1425    case spv::OpTypeArray:
1426        if (strip_array_level) {
1427            return get_locations_consumed_by_type(src, insn.word(2), false);
1428        } else {
1429            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1430        }
1431    case spv::OpTypeMatrix:
1432        /* num locations is the dimension * element size */
1433        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1434    default:
1435        /* everything else is just 1. */
1436        return 1;
1437
1438        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1439         * multiple locations. */
1440    }
1441}
1442
1443typedef std::pair<unsigned, unsigned> location_t;
1444typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1445
1446struct interface_var {
1447    uint32_t id;
1448    uint32_t type_id;
1449    uint32_t offset;
1450    /* TODO: collect the name, too? Isn't required to be present. */
1451};
1452
1453static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1454    while (true) {
1455
1456        if (def.opcode() == spv::OpTypePointer) {
1457            def = src->get_def(def.word(3));
1458        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1459            def = src->get_def(def.word(2));
1460            is_array_of_verts = false;
1461        } else if (def.opcode() == spv::OpTypeStruct) {
1462            return def;
1463        } else {
1464            return src->end();
1465        }
1466    }
1467}
1468
1469static void collect_interface_block_members(layer_data *my_data, VkDevice dev, shader_module const *src,
1470                                            std::map<location_t, interface_var> &out,
1471                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1472                                            uint32_t id, uint32_t type_id) {
1473    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1474    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts);
1475    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1476        /* this isn't an interface block. */
1477        return;
1478    }
1479
1480    std::unordered_map<unsigned, unsigned> member_components;
1481
1482    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1483    for (auto insn : *src) {
1484        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1485            unsigned member_index = insn.word(2);
1486
1487            if (insn.word(3) == spv::DecorationComponent) {
1488                unsigned component = insn.word(4);
1489                member_components[member_index] = component;
1490            }
1491        }
1492    }
1493
1494    /* Second pass -- produce the output, from Location decorations */
1495    for (auto insn : *src) {
1496        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1497            unsigned member_index = insn.word(2);
1498            unsigned member_type_id = type.word(2 + member_index);
1499
1500            if (insn.word(3) == spv::DecorationLocation) {
1501                unsigned location = insn.word(4);
1502                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1503                auto component_it = member_components.find(member_index);
1504                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1505
1506                for (unsigned int offset = 0; offset < num_locations; offset++) {
1507                    interface_var v;
1508                    v.id = id;
1509                    /* TODO: member index in interface_var too? */
1510                    v.type_id = member_type_id;
1511                    v.offset = offset;
1512                    out[std::make_pair(location + offset, component)] = v;
1513                }
1514            }
1515        }
1516    }
1517}
1518
1519static void collect_interface_by_location(layer_data *my_data, VkDevice dev, shader_module const *src, spirv_inst_iter entrypoint,
1520                                          spv::StorageClass sinterface, std::map<location_t, interface_var> &out,
1521                                          bool is_array_of_verts) {
1522    std::unordered_map<unsigned, unsigned> var_locations;
1523    std::unordered_map<unsigned, unsigned> var_builtins;
1524    std::unordered_map<unsigned, unsigned> var_components;
1525    std::unordered_map<unsigned, unsigned> blocks;
1526
1527    for (auto insn : *src) {
1528
1529        /* We consider two interface models: SSO rendezvous-by-location, and
1530         * builtins. Complain about anything that fits neither model.
1531         */
1532        if (insn.opcode() == spv::OpDecorate) {
1533            if (insn.word(2) == spv::DecorationLocation) {
1534                var_locations[insn.word(1)] = insn.word(3);
1535            }
1536
1537            if (insn.word(2) == spv::DecorationBuiltIn) {
1538                var_builtins[insn.word(1)] = insn.word(3);
1539            }
1540
1541            if (insn.word(2) == spv::DecorationComponent) {
1542                var_components[insn.word(1)] = insn.word(3);
1543            }
1544
1545            if (insn.word(2) == spv::DecorationBlock) {
1546                blocks[insn.word(1)] = 1;
1547            }
1548        }
1549    }
1550
1551    /* TODO: handle grouped decorations */
1552    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1553     * have the same location, and we DONT want to clobber. */
1554
1555    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1556       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1557       the word to determine which word contains the terminator. */
1558    auto word = 3;
1559    while (entrypoint.word(word) & 0xff000000u) {
1560        ++word;
1561    }
1562    ++word;
1563
1564    for (; word < entrypoint.len(); word++) {
1565        auto insn = src->get_def(entrypoint.word(word));
1566        assert(insn != src->end());
1567        assert(insn.opcode() == spv::OpVariable);
1568
1569        if (insn.word(3) == sinterface) {
1570            unsigned id = insn.word(2);
1571            unsigned type = insn.word(1);
1572
1573            int location = value_or_default(var_locations, id, -1);
1574            int builtin = value_or_default(var_builtins, id, -1);
1575            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1576
1577            /* All variables and interface block members in the Input or Output storage classes
1578             * must be decorated with either a builtin or an explicit location.
1579             *
1580             * TODO: integrate the interface block support here. For now, don't complain --
1581             * a valid SPIRV module will only hit this path for the interface block case, as the
1582             * individual members of the type are decorated, rather than variable declarations.
1583             */
1584
1585            if (location != -1) {
1586                /* A user-defined interface variable, with a location. Where a variable
1587                 * occupied multiple locations, emit one result for each. */
1588                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts);
1589                for (unsigned int offset = 0; offset < num_locations; offset++) {
1590                    interface_var v;
1591                    v.id = id;
1592                    v.type_id = type;
1593                    v.offset = offset;
1594                    out[std::make_pair(location + offset, component)] = v;
1595                }
1596            } else if (builtin == -1) {
1597                /* An interface block instance */
1598                collect_interface_block_members(my_data, dev, src, out, blocks, is_array_of_verts, id, type);
1599            }
1600        }
1601    }
1602}
1603
1604static void collect_interface_by_descriptor_slot(layer_data *my_data, VkDevice dev, shader_module const *src,
1605                                                 std::unordered_set<uint32_t> const &accessible_ids,
1606                                                 std::map<descriptor_slot_t, interface_var> &out) {
1607
1608    std::unordered_map<unsigned, unsigned> var_sets;
1609    std::unordered_map<unsigned, unsigned> var_bindings;
1610
1611    for (auto insn : *src) {
1612        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1613         * DecorationDescriptorSet and DecorationBinding.
1614         */
1615        if (insn.opcode() == spv::OpDecorate) {
1616            if (insn.word(2) == spv::DecorationDescriptorSet) {
1617                var_sets[insn.word(1)] = insn.word(3);
1618            }
1619
1620            if (insn.word(2) == spv::DecorationBinding) {
1621                var_bindings[insn.word(1)] = insn.word(3);
1622            }
1623        }
1624    }
1625
1626    for (auto id : accessible_ids) {
1627        auto insn = src->get_def(id);
1628        assert(insn != src->end());
1629
1630        if (insn.opcode() == spv::OpVariable &&
1631            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1632            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1633            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1634
1635            auto existing_it = out.find(std::make_pair(set, binding));
1636            if (existing_it != out.end()) {
1637                /* conflict within spv image */
1638                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1639                        __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
1640                        "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition",
1641                        insn.word(2), insn.word(1), storage_class_name(insn.word(3)), existing_it->first.first,
1642                        existing_it->first.second);
1643            }
1644
1645            interface_var v;
1646            v.id = insn.word(2);
1647            v.type_id = insn.word(1);
1648            out[std::make_pair(set, binding)] = v;
1649        }
1650    }
1651}
1652
1653static bool validate_interface_between_stages(layer_data *my_data, VkDevice dev, shader_module const *producer,
1654                                              spirv_inst_iter producer_entrypoint, char const *producer_name,
1655                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1656                                              char const *consumer_name, bool consumer_arrayed_input) {
1657    std::map<location_t, interface_var> outputs;
1658    std::map<location_t, interface_var> inputs;
1659
1660    bool pass = true;
1661
1662    collect_interface_by_location(my_data, dev, producer, producer_entrypoint, spv::StorageClassOutput, outputs, false);
1663    collect_interface_by_location(my_data, dev, consumer, consumer_entrypoint, spv::StorageClassInput, inputs,
1664                                  consumer_arrayed_input);
1665
1666    auto a_it = outputs.begin();
1667    auto b_it = inputs.begin();
1668
1669    /* maps sorted by key (location); walk them together to find mismatches */
1670    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1671        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1672        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1673        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1674        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1675
1676        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1677            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
1678                        /*dev*/ 0, __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1679                        "%s writes to output location %u.%u which is not consumed by %s", producer_name, a_first.first,
1680                        a_first.second, consumer_name)) {
1681                pass = false;
1682            }
1683            a_it++;
1684        } else if (a_at_end || a_first > b_first) {
1685            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1686                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1687                        "%s consumes input location %u.%u which is not written by %s", consumer_name, b_first.first, b_first.second,
1688                        producer_name)) {
1689                pass = false;
1690            }
1691            b_it++;
1692        } else {
1693            if (types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id, consumer_arrayed_input)) {
1694                /* OK! */
1695            } else {
1696                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1697                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1698                            a_first.first, a_first.second,
1699                            describe_type(producer, a_it->second.type_id).c_str(),
1700                            describe_type(consumer, b_it->second.type_id).c_str())) {
1701                    pass = false;
1702                }
1703            }
1704            a_it++;
1705            b_it++;
1706        }
1707    }
1708
1709    return pass;
1710}
1711
1712enum FORMAT_TYPE {
1713    FORMAT_TYPE_UNDEFINED,
1714    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1715    FORMAT_TYPE_SINT,
1716    FORMAT_TYPE_UINT,
1717};
1718
1719static unsigned get_format_type(VkFormat fmt) {
1720    switch (fmt) {
1721    case VK_FORMAT_UNDEFINED:
1722        return FORMAT_TYPE_UNDEFINED;
1723    case VK_FORMAT_R8_SINT:
1724    case VK_FORMAT_R8G8_SINT:
1725    case VK_FORMAT_R8G8B8_SINT:
1726    case VK_FORMAT_R8G8B8A8_SINT:
1727    case VK_FORMAT_R16_SINT:
1728    case VK_FORMAT_R16G16_SINT:
1729    case VK_FORMAT_R16G16B16_SINT:
1730    case VK_FORMAT_R16G16B16A16_SINT:
1731    case VK_FORMAT_R32_SINT:
1732    case VK_FORMAT_R32G32_SINT:
1733    case VK_FORMAT_R32G32B32_SINT:
1734    case VK_FORMAT_R32G32B32A32_SINT:
1735    case VK_FORMAT_B8G8R8_SINT:
1736    case VK_FORMAT_B8G8R8A8_SINT:
1737    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1738    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1739        return FORMAT_TYPE_SINT;
1740    case VK_FORMAT_R8_UINT:
1741    case VK_FORMAT_R8G8_UINT:
1742    case VK_FORMAT_R8G8B8_UINT:
1743    case VK_FORMAT_R8G8B8A8_UINT:
1744    case VK_FORMAT_R16_UINT:
1745    case VK_FORMAT_R16G16_UINT:
1746    case VK_FORMAT_R16G16B16_UINT:
1747    case VK_FORMAT_R16G16B16A16_UINT:
1748    case VK_FORMAT_R32_UINT:
1749    case VK_FORMAT_R32G32_UINT:
1750    case VK_FORMAT_R32G32B32_UINT:
1751    case VK_FORMAT_R32G32B32A32_UINT:
1752    case VK_FORMAT_B8G8R8_UINT:
1753    case VK_FORMAT_B8G8R8A8_UINT:
1754    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1755    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1756        return FORMAT_TYPE_UINT;
1757    default:
1758        return FORMAT_TYPE_FLOAT;
1759    }
1760}
1761
1762/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1763 * for comparison to a VkFormat's characterization above. */
1764static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1765    auto insn = src->get_def(type);
1766    assert(insn != src->end());
1767
1768    switch (insn.opcode()) {
1769    case spv::OpTypeInt:
1770        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1771    case spv::OpTypeFloat:
1772        return FORMAT_TYPE_FLOAT;
1773    case spv::OpTypeVector:
1774        return get_fundamental_type(src, insn.word(2));
1775    case spv::OpTypeMatrix:
1776        return get_fundamental_type(src, insn.word(2));
1777    case spv::OpTypeArray:
1778        return get_fundamental_type(src, insn.word(2));
1779    case spv::OpTypePointer:
1780        return get_fundamental_type(src, insn.word(3));
1781    default:
1782        return FORMAT_TYPE_UNDEFINED;
1783    }
1784}
1785
1786static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1787    uint32_t bit_pos = u_ffs(stage);
1788    return bit_pos - 1;
1789}
1790
1791static bool validate_vi_consistency(layer_data *my_data, VkDevice dev, VkPipelineVertexInputStateCreateInfo const *vi) {
1792    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1793     * each binding should be specified only once.
1794     */
1795    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1796    bool pass = true;
1797
1798    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1799        auto desc = &vi->pVertexBindingDescriptions[i];
1800        auto &binding = bindings[desc->binding];
1801        if (binding) {
1802            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1803                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1804                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1805                pass = false;
1806            }
1807        } else {
1808            binding = desc;
1809        }
1810    }
1811
1812    return pass;
1813}
1814
1815static bool validate_vi_against_vs_inputs(layer_data *my_data, VkDevice dev, VkPipelineVertexInputStateCreateInfo const *vi,
1816                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1817    std::map<location_t, interface_var> inputs;
1818    bool pass = true;
1819
1820    collect_interface_by_location(my_data, dev, vs, entrypoint, spv::StorageClassInput, inputs, false);
1821
1822    /* Build index by location */
1823    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1824    if (vi) {
1825        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++)
1826            attribs[vi->pVertexAttributeDescriptions[i].location] = &vi->pVertexAttributeDescriptions[i];
1827    }
1828
1829    auto it_a = attribs.begin();
1830    auto it_b = inputs.begin();
1831
1832    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1833        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1834        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1835        auto a_first = a_at_end ? 0 : it_a->first;
1836        auto b_first = b_at_end ? 0 : it_b->first.first;
1837        if (!a_at_end && (b_at_end || a_first < b_first)) {
1838            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
1839                        /*dev*/ 0, __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1840                        "Vertex attribute at location %d not consumed by VS", a_first)) {
1841                pass = false;
1842            }
1843            it_a++;
1844        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1845            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1846                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
1847                        b_first)) {
1848                pass = false;
1849            }
1850            it_b++;
1851        } else {
1852            unsigned attrib_type = get_format_type(it_a->second->format);
1853            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1854
1855            /* type checking */
1856            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1857                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1858                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1859                            "Attribute type of `%s` at location %d does not match VS input type of `%s`",
1860                            string_VkFormat(it_a->second->format), a_first,
1861                            describe_type(vs, it_b->second.type_id).c_str())) {
1862                    pass = false;
1863                }
1864            }
1865
1866            /* OK! */
1867            it_a++;
1868            it_b++;
1869        }
1870    }
1871
1872    return pass;
1873}
1874
1875static bool validate_fs_outputs_against_render_pass(layer_data *my_data, VkDevice dev, shader_module const *fs,
1876                                                    spirv_inst_iter entrypoint, RENDER_PASS_NODE const *rp, uint32_t subpass) {
1877    const std::vector<VkFormat> &color_formats = rp->subpassColorFormats[subpass];
1878    std::map<location_t, interface_var> outputs;
1879    bool pass = true;
1880
1881    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1882
1883    collect_interface_by_location(my_data, dev, fs, entrypoint, spv::StorageClassOutput, outputs, false);
1884
1885    auto it = outputs.begin();
1886    uint32_t attachment = 0;
1887
1888    /* Walk attachment list and outputs together -- this is a little overpowered since attachments
1889     * are currently dense, but the parallel with matching between shader stages is nice.
1890     */
1891
1892    while ((outputs.size() > 0 && it != outputs.end()) || attachment < color_formats.size()) {
1893        if (attachment == color_formats.size() || (it != outputs.end() && it->first.first < attachment)) {
1894            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1895                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1896                        "FS writes to output location %d with no matching attachment", it->first.first)) {
1897                pass = false;
1898            }
1899            it++;
1900        } else if (it == outputs.end() || it->first.first > attachment) {
1901            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1902                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", attachment)) {
1903                pass = false;
1904            }
1905            attachment++;
1906        } else {
1907            unsigned output_type = get_fundamental_type(fs, it->second.type_id);
1908            unsigned att_type = get_format_type(color_formats[attachment]);
1909
1910            /* type checking */
1911            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1912                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1913                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1914                            "Attachment %d of type `%s` does not match FS output type of `%s`", attachment,
1915                            string_VkFormat(color_formats[attachment]),
1916                            describe_type(fs, it->second.type_id).c_str())) {
1917                    pass = false;
1918                }
1919            }
1920
1921            /* OK! */
1922            it++;
1923            attachment++;
1924        }
1925    }
1926
1927    return pass;
1928}
1929
1930/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
1931 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
1932 * for example.
1933 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1934 *  - NOT the shader input/output interfaces.
1935 *
1936 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1937 * converting parts of this to be generated from the machine-readable spec instead.
1938 */
1939static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) {
1940    std::unordered_set<uint32_t> worklist;
1941    worklist.insert(entrypoint.word(2));
1942
1943    while (!worklist.empty()) {
1944        auto id_iter = worklist.begin();
1945        auto id = *id_iter;
1946        worklist.erase(id_iter);
1947
1948        auto insn = src->get_def(id);
1949        if (insn == src->end()) {
1950            /* id is something we didnt collect in build_def_index. that's OK -- we'll stumble
1951             * across all kinds of things here that we may not care about. */
1952            continue;
1953        }
1954
1955        /* try to add to the output set */
1956        if (!ids.insert(id).second) {
1957            continue; /* if we already saw this id, we don't want to walk it again. */
1958        }
1959
1960        switch (insn.opcode()) {
1961        case spv::OpFunction:
1962            /* scan whole body of the function, enlisting anything interesting */
1963            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1964                switch (insn.opcode()) {
1965                case spv::OpLoad:
1966                case spv::OpAtomicLoad:
1967                case spv::OpAtomicExchange:
1968                case spv::OpAtomicCompareExchange:
1969                case spv::OpAtomicCompareExchangeWeak:
1970                case spv::OpAtomicIIncrement:
1971                case spv::OpAtomicIDecrement:
1972                case spv::OpAtomicIAdd:
1973                case spv::OpAtomicISub:
1974                case spv::OpAtomicSMin:
1975                case spv::OpAtomicUMin:
1976                case spv::OpAtomicSMax:
1977                case spv::OpAtomicUMax:
1978                case spv::OpAtomicAnd:
1979                case spv::OpAtomicOr:
1980                case spv::OpAtomicXor:
1981                    worklist.insert(insn.word(3)); /* ptr */
1982                    break;
1983                case spv::OpStore:
1984                case spv::OpAtomicStore:
1985                    worklist.insert(insn.word(1)); /* ptr */
1986                    break;
1987                case spv::OpAccessChain:
1988                case spv::OpInBoundsAccessChain:
1989                    worklist.insert(insn.word(3)); /* base ptr */
1990                    break;
1991                case spv::OpSampledImage:
1992                case spv::OpImageSampleImplicitLod:
1993                case spv::OpImageSampleExplicitLod:
1994                case spv::OpImageSampleDrefImplicitLod:
1995                case spv::OpImageSampleDrefExplicitLod:
1996                case spv::OpImageSampleProjImplicitLod:
1997                case spv::OpImageSampleProjExplicitLod:
1998                case spv::OpImageSampleProjDrefImplicitLod:
1999                case spv::OpImageSampleProjDrefExplicitLod:
2000                case spv::OpImageFetch:
2001                case spv::OpImageGather:
2002                case spv::OpImageDrefGather:
2003                case spv::OpImageRead:
2004                case spv::OpImage:
2005                case spv::OpImageQueryFormat:
2006                case spv::OpImageQueryOrder:
2007                case spv::OpImageQuerySizeLod:
2008                case spv::OpImageQuerySize:
2009                case spv::OpImageQueryLod:
2010                case spv::OpImageQueryLevels:
2011                case spv::OpImageQuerySamples:
2012                case spv::OpImageSparseSampleImplicitLod:
2013                case spv::OpImageSparseSampleExplicitLod:
2014                case spv::OpImageSparseSampleDrefImplicitLod:
2015                case spv::OpImageSparseSampleDrefExplicitLod:
2016                case spv::OpImageSparseSampleProjImplicitLod:
2017                case spv::OpImageSparseSampleProjExplicitLod:
2018                case spv::OpImageSparseSampleProjDrefImplicitLod:
2019                case spv::OpImageSparseSampleProjDrefExplicitLod:
2020                case spv::OpImageSparseFetch:
2021                case spv::OpImageSparseGather:
2022                case spv::OpImageSparseDrefGather:
2023                case spv::OpImageTexelPointer:
2024                    worklist.insert(insn.word(3)); /* image or sampled image */
2025                    break;
2026                case spv::OpImageWrite:
2027                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
2028                    break;
2029                case spv::OpFunctionCall:
2030                    for (auto i = 3; i < insn.len(); i++) {
2031                        worklist.insert(insn.word(i)); /* fn itself, and all args */
2032                    }
2033                    break;
2034
2035                case spv::OpExtInst:
2036                    for (auto i = 5; i < insn.len(); i++) {
2037                        worklist.insert(insn.word(i)); /* operands to ext inst */
2038                    }
2039                    break;
2040                }
2041            }
2042            break;
2043        }
2044    }
2045}
2046
2047struct shader_stage_attributes {
2048    char const *const name;
2049    bool arrayed_input;
2050};
2051
2052static shader_stage_attributes shader_stage_attribs[] = {
2053    {"vertex shader", false},
2054    {"tessellation control shader", true},
2055    {"tessellation evaluation shader", false},
2056    {"geometry shader", true},
2057    {"fragment shader", false},
2058};
2059
2060static bool validate_push_constant_block_against_pipeline(layer_data *my_data, VkDevice dev,
2061                                                          std::vector<VkPushConstantRange> const *pushConstantRanges,
2062                                                          shader_module const *src, spirv_inst_iter type,
2063                                                          VkShaderStageFlagBits stage) {
2064    bool pass = true;
2065
2066    /* strip off ptrs etc */
2067    type = get_struct_type(src, type, false);
2068    assert(type != src->end());
2069
2070    /* validate directly off the offsets. this isn't quite correct for arrays
2071     * and matrices, but is a good first step. TODO: arrays, matrices, weird
2072     * sizes */
2073    for (auto insn : *src) {
2074        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
2075
2076            if (insn.word(3) == spv::DecorationOffset) {
2077                unsigned offset = insn.word(4);
2078                auto size = 4; /* bytes; TODO: calculate this based on the type */
2079
2080                bool found_range = false;
2081                for (auto const &range : *pushConstantRanges) {
2082                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
2083                        found_range = true;
2084
2085                        if ((range.stageFlags & stage) == 0) {
2086                            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2087                                        /* dev */ 0, __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2088                                        "Push constant range covering variable starting at "
2089                                        "offset %u not accessible from stage %s",
2090                                        offset, string_VkShaderStageFlagBits(stage))) {
2091                                pass = false;
2092                            }
2093                        }
2094
2095                        break;
2096                    }
2097                }
2098
2099                if (!found_range) {
2100                    if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2101                                /* dev */ 0, __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
2102                                "Push constant range covering variable starting at "
2103                                "offset %u not declared in layout",
2104                                offset)) {
2105                        pass = false;
2106                    }
2107                }
2108            }
2109        }
2110    }
2111
2112    return pass;
2113}
2114
2115static bool validate_push_constant_usage(layer_data *my_data, VkDevice dev,
2116                                         std::vector<VkPushConstantRange> const *pushConstantRanges, shader_module const *src,
2117                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2118    bool pass = true;
2119
2120    for (auto id : accessible_ids) {
2121        auto def_insn = src->get_def(id);
2122        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
2123            pass = validate_push_constant_block_against_pipeline(my_data, dev, pushConstantRanges, src,
2124                                                                 src->get_def(def_insn.word(1)), stage) &&
2125                   pass;
2126        }
2127    }
2128
2129    return pass;
2130}
2131
2132// For given pipelineLayout verify that the setLayout at slot.first
2133//  has the requested binding at slot.second
2134static VkDescriptorSetLayoutBinding const * get_descriptor_binding(layer_data *my_data, vector<VkDescriptorSetLayout> *pipelineLayout, descriptor_slot_t slot) {
2135
2136    if (!pipelineLayout)
2137        return nullptr;
2138
2139    if (slot.first >= pipelineLayout->size())
2140        return nullptr;
2141
2142    auto const layout_node = my_data->descriptorSetLayoutMap[(*pipelineLayout)[slot.first]];
2143
2144    auto bindingIt = layout_node->bindingToIndexMap.find(slot.second);
2145    if ((bindingIt == layout_node->bindingToIndexMap.end()) || (layout_node->createInfo.pBindings == NULL))
2146        return nullptr;
2147
2148    assert(bindingIt->second < layout_node->createInfo.bindingCount);
2149    return &layout_node->createInfo.pBindings[bindingIt->second];
2150}
2151
2152// Block of code at start here for managing/tracking Pipeline state that this layer cares about
2153
2154static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
2155
2156// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2157//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2158//   to that same cmd buffer by separate thread are not changing state from underneath us
2159// Track the last cmd buffer touched by this thread
2160
2161static VkBool32 hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2162    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2163        if (pCB->drawCount[i])
2164            return VK_TRUE;
2165    }
2166    return VK_FALSE;
2167}
2168
2169// Check object status for selected flag state
2170static VkBool32 validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
2171                                DRAW_STATE_ERROR error_code, const char *fail_msg) {
2172    if (!(pNode->status & status_mask)) {
2173        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2174                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS",
2175                       "CB object %#" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg);
2176    }
2177    return VK_FALSE;
2178}
2179
2180// Retrieve pipeline node ptr for given pipeline object
2181static PIPELINE_NODE *getPipeline(layer_data *my_data, const VkPipeline pipeline) {
2182    if (my_data->pipelineMap.find(pipeline) == my_data->pipelineMap.end()) {
2183        return NULL;
2184    }
2185    return my_data->pipelineMap[pipeline];
2186}
2187
2188// Return VK_TRUE if for a given PSO, the given state enum is dynamic, else return VK_FALSE
2189static VkBool32 isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
2190    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2191        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2192            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2193                return VK_TRUE;
2194        }
2195    }
2196    return VK_FALSE;
2197}
2198
2199// Validate state stored as flags at time of draw call
2200static VkBool32 validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe,
2201                                          VkBool32 indexedDraw) {
2202    VkBool32 result;
2203    result = validate_status(dev_data, pCB, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_VIEWPORT_NOT_BOUND,
2204                             "Dynamic viewport state not set for this command buffer");
2205    result |= validate_status(dev_data, pCB, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_SCISSOR_NOT_BOUND,
2206                              "Dynamic scissor state not set for this command buffer");
2207    if ((pPipe->iaStateCI.topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2208        (pPipe->iaStateCI.topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP)) {
2209        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2210                                  DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
2211    }
2212    if (pPipe->rsStateCI.depthBiasEnable) {
2213        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2214                                  DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
2215    }
2216    if (pPipe->blendConstantsEnabled) {
2217        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2218                                  DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer");
2219    }
2220    if (pPipe->dsStateCI.depthBoundsTestEnable) {
2221        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2222                                  DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
2223    }
2224    if (pPipe->dsStateCI.stencilTestEnable) {
2225        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2226                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
2227        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2228                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
2229        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2230                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
2231    }
2232    if (indexedDraw) {
2233        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2234                                  DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2235                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2236    }
2237    return result;
2238}
2239
2240// Verify attachment reference compatibility according to spec
2241//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2242//  If both AttachmentReference arrays have requested index, check their corresponding AttachementDescriptions
2243//   to make sure that format and samples counts match.
2244//  If not, they are not compatible.
2245static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2246                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2247                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2248                                             const VkAttachmentDescription *pSecondaryAttachments) {
2249    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2250        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2251            return true;
2252    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2253        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2254            return true;
2255    } else { // format and sample count must match
2256        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2257             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2258            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2259             pSecondaryAttachments[pSecondary[index].attachment].samples))
2260            return true;
2261    }
2262    // Format and sample counts didn't match
2263    return false;
2264}
2265
2266// For give primary and secondary RenderPass objects, verify that they're compatible
2267static bool verify_renderpass_compatibility(layer_data *my_data, const VkRenderPass primaryRP, const VkRenderPass secondaryRP,
2268                                            string &errorMsg) {
2269    stringstream errorStr;
2270    if (my_data->renderPassMap.find(primaryRP) == my_data->renderPassMap.end()) {
2271        errorStr << "invalid VkRenderPass (" << primaryRP << ")";
2272        errorMsg = errorStr.str();
2273        return false;
2274    } else if (my_data->renderPassMap.find(secondaryRP) == my_data->renderPassMap.end()) {
2275        errorStr << "invalid VkRenderPass (" << secondaryRP << ")";
2276        errorMsg = errorStr.str();
2277        return false;
2278    }
2279    // Trivial pass case is exact same RP
2280    if (primaryRP == secondaryRP) {
2281        return true;
2282    }
2283    const VkRenderPassCreateInfo *primaryRPCI = my_data->renderPassMap[primaryRP]->pCreateInfo;
2284    const VkRenderPassCreateInfo *secondaryRPCI = my_data->renderPassMap[secondaryRP]->pCreateInfo;
2285    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2286        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2287                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2288        errorMsg = errorStr.str();
2289        return false;
2290    }
2291    uint32_t spIndex = 0;
2292    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2293        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2294        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2295        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2296        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2297        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2298            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2299                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2300                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2301                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2302                errorMsg = errorStr.str();
2303                return false;
2304            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2305                                                         primaryColorCount, primaryRPCI->pAttachments,
2306                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2307                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2308                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2309                errorMsg = errorStr.str();
2310                return false;
2311            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2312                                                         primaryColorCount, primaryRPCI->pAttachments,
2313                                                         secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2314                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2315                errorStr << "depth/stencil attachments at index " << cIdx << " of subpass index " << spIndex
2316                         << " are not compatible.";
2317                errorMsg = errorStr.str();
2318                return false;
2319            }
2320        }
2321        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2322        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2323        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2324        for (uint32_t i = 0; i < inputMax; ++i) {
2325            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2326                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2327                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2328                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2329                errorMsg = errorStr.str();
2330                return false;
2331            }
2332        }
2333    }
2334    return true;
2335}
2336
2337// For give SET_NODE, verify that its Set is compatible w/ the setLayout corresponding to pipelineLayout[layoutIndex]
2338static bool verify_set_layout_compatibility(layer_data *my_data, const SET_NODE *pSet, const VkPipelineLayout layout,
2339                                            const uint32_t layoutIndex, string &errorMsg) {
2340    stringstream errorStr;
2341    auto pipeline_layout_it = my_data->pipelineLayoutMap.find(layout);
2342    if (pipeline_layout_it == my_data->pipelineLayoutMap.end()) {
2343        errorStr << "invalid VkPipelineLayout (" << layout << ")";
2344        errorMsg = errorStr.str();
2345        return false;
2346    }
2347    if (layoutIndex >= pipeline_layout_it->second.descriptorSetLayouts.size()) {
2348        errorStr << "VkPipelineLayout (" << layout << ") only contains " << pipeline_layout_it->second.descriptorSetLayouts.size()
2349                 << " setLayouts corresponding to sets 0-" << pipeline_layout_it->second.descriptorSetLayouts.size() - 1
2350                 << ", but you're attempting to bind set to index " << layoutIndex;
2351        errorMsg = errorStr.str();
2352        return false;
2353    }
2354    // Get the specific setLayout from PipelineLayout that overlaps this set
2355    LAYOUT_NODE *pLayoutNode = my_data->descriptorSetLayoutMap[pipeline_layout_it->second.descriptorSetLayouts[layoutIndex]];
2356    if (pLayoutNode->layout == pSet->pLayout->layout) { // trivial pass case
2357        return true;
2358    }
2359    size_t descriptorCount = pLayoutNode->descriptorTypes.size();
2360    if (descriptorCount != pSet->pLayout->descriptorTypes.size()) {
2361        errorStr << "setLayout " << layoutIndex << " from pipelineLayout " << layout << " has " << descriptorCount
2362                 << " descriptors, but corresponding set being bound has " << pSet->pLayout->descriptorTypes.size()
2363                 << " descriptors.";
2364        errorMsg = errorStr.str();
2365        return false; // trivial fail case
2366    }
2367    // Now need to check set against corresponding pipelineLayout to verify compatibility
2368    for (size_t i = 0; i < descriptorCount; ++i) {
2369        // Need to verify that layouts are identically defined
2370        //  TODO : Is below sufficient? Making sure that types & stageFlags match per descriptor
2371        //    do we also need to check immutable samplers?
2372        if (pLayoutNode->descriptorTypes[i] != pSet->pLayout->descriptorTypes[i]) {
2373            errorStr << "descriptor " << i << " for descriptorSet being bound is type '"
2374                     << string_VkDescriptorType(pSet->pLayout->descriptorTypes[i])
2375                     << "' but corresponding descriptor from pipelineLayout is type '"
2376                     << string_VkDescriptorType(pLayoutNode->descriptorTypes[i]) << "'";
2377            errorMsg = errorStr.str();
2378            return false;
2379        }
2380        if (pLayoutNode->stageFlags[i] != pSet->pLayout->stageFlags[i]) {
2381            errorStr << "stageFlags " << i << " for descriptorSet being bound is " << pSet->pLayout->stageFlags[i]
2382                     << "' but corresponding descriptor from pipelineLayout has stageFlags " << pLayoutNode->stageFlags[i];
2383            errorMsg = errorStr.str();
2384            return false;
2385        }
2386    }
2387    return true;
2388}
2389
2390// Validate that data for each specialization entry is fully contained within the buffer.
2391static VkBool32 validate_specialization_offsets(layer_data *my_data, VkPipelineShaderStageCreateInfo const *info) {
2392    VkBool32 pass = VK_TRUE;
2393
2394    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2395
2396    if (spec) {
2397        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2398            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2399                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2400                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2401                            "Specialization entry %u (for constant id %u) references memory outside provided "
2402                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2403                            " bytes provided)",
2404                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2405                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2406
2407                    pass = VK_FALSE;
2408                }
2409            }
2410        }
2411    }
2412
2413    return pass;
2414}
2415
2416static bool descriptor_type_match(layer_data *my_data, shader_module const *module, uint32_t type_id,
2417                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2418    auto type = module->get_def(type_id);
2419
2420    descriptor_count = 1;
2421
2422    /* Strip off any array or ptrs. Where we remove array levels, adjust the
2423     * descriptor count for each dimension. */
2424    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2425        if (type.opcode() == spv::OpTypeArray) {
2426            descriptor_count *= get_constant_value(module, type.word(3));
2427            type = module->get_def(type.word(2));
2428        }
2429        else {
2430            type = module->get_def(type.word(3));
2431        }
2432    }
2433
2434    switch (type.opcode()) {
2435    case spv::OpTypeStruct: {
2436        for (auto insn : *module) {
2437            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2438                if (insn.word(2) == spv::DecorationBlock) {
2439                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2440                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2441                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2442                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2443                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2444                }
2445            }
2446        }
2447
2448        /* Invalid */
2449        return false;
2450    }
2451
2452    case spv::OpTypeSampler:
2453        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER;
2454
2455    case spv::OpTypeSampledImage:
2456        if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2457            /* Slight relaxation for some GLSL historical madness: samplerBuffer
2458             * doesn't really have a sampler, and a texel buffer descriptor
2459             * doesn't really provide one. Allow this slight mismatch.
2460             */
2461            auto image_type = module->get_def(type.word(2));
2462            auto dim = image_type.word(3);
2463            auto sampled = image_type.word(7);
2464            return dim == spv::DimBuffer && sampled == 1;
2465        }
2466        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2467
2468    case spv::OpTypeImage: {
2469        /* Many descriptor types backing image types-- depends on dimension
2470         * and whether the image will be used with a sampler. SPIRV for
2471         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2472         * runtime is unacceptable.
2473         */
2474        auto dim = type.word(3);
2475        auto sampled = type.word(7);
2476
2477        if (dim == spv::DimSubpassData) {
2478            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2479        } else if (dim == spv::DimBuffer) {
2480            if (sampled == 1) {
2481                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2482            } else {
2483                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2484            }
2485        } else if (sampled == 1) {
2486            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
2487        } else {
2488            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2489        }
2490    }
2491
2492    /* We shouldn't really see any other junk types -- but if we do, they're
2493     * a mismatch.
2494     */
2495    default:
2496        return false; /* Mismatch */
2497    }
2498}
2499
2500static VkBool32 require_feature(layer_data *my_data, VkBool32 feature, char const *feature_name) {
2501    if (!feature) {
2502        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2503                    /* dev */ 0, __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2504                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2505                    "enabled on the device",
2506                    feature_name)) {
2507            return false;
2508        }
2509    }
2510
2511    return true;
2512}
2513
2514static VkBool32 validate_shader_capabilities(layer_data *my_data, VkDevice dev, shader_module const *src)
2515{
2516    VkBool32 pass = VK_TRUE;
2517
2518    auto enabledFeatures = &my_data->physDevProperties.features;
2519
2520    for (auto insn : *src) {
2521        if (insn.opcode() == spv::OpCapability) {
2522            switch (insn.word(1)) {
2523            case spv::CapabilityMatrix:
2524            case spv::CapabilityShader:
2525            case spv::CapabilityInputAttachment:
2526            case spv::CapabilitySampled1D:
2527            case spv::CapabilityImage1D:
2528            case spv::CapabilitySampledBuffer:
2529            case spv::CapabilityImageBuffer:
2530            case spv::CapabilityImageQuery:
2531            case spv::CapabilityDerivativeControl:
2532                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2533                break;
2534
2535            case spv::CapabilityGeometry:
2536                pass &= require_feature(my_data, enabledFeatures->geometryShader, "geometryShader");
2537                break;
2538
2539            case spv::CapabilityTessellation:
2540                pass &= require_feature(my_data, enabledFeatures->tessellationShader, "tessellationShader");
2541                break;
2542
2543            case spv::CapabilityFloat64:
2544                pass &= require_feature(my_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2545                break;
2546
2547            case spv::CapabilityInt64:
2548                pass &= require_feature(my_data, enabledFeatures->shaderInt64, "shaderInt64");
2549                break;
2550
2551            case spv::CapabilityTessellationPointSize:
2552            case spv::CapabilityGeometryPointSize:
2553                pass &= require_feature(my_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2554                                        "shaderTessellationAndGeometryPointSize");
2555                break;
2556
2557            case spv::CapabilityImageGatherExtended:
2558                pass &= require_feature(my_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2559                break;
2560
2561            case spv::CapabilityStorageImageMultisample:
2562                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2563                break;
2564
2565            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2566                pass &= require_feature(my_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2567                                        "shaderUniformBufferArrayDynamicIndexing");
2568                break;
2569
2570            case spv::CapabilitySampledImageArrayDynamicIndexing:
2571                pass &= require_feature(my_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2572                                        "shaderSampledImageArrayDynamicIndexing");
2573                break;
2574
2575            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2576                pass &= require_feature(my_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2577                                        "shaderStorageBufferArrayDynamicIndexing");
2578                break;
2579
2580            case spv::CapabilityStorageImageArrayDynamicIndexing:
2581                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2582                                        "shaderStorageImageArrayDynamicIndexing");
2583                break;
2584
2585            case spv::CapabilityClipDistance:
2586                pass &= require_feature(my_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2587                break;
2588
2589            case spv::CapabilityCullDistance:
2590                pass &= require_feature(my_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2591                break;
2592
2593            case spv::CapabilityImageCubeArray:
2594                pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2595                break;
2596
2597            case spv::CapabilitySampleRateShading:
2598                pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2599                break;
2600
2601            case spv::CapabilitySparseResidency:
2602                pass &= require_feature(my_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2603                break;
2604
2605            case spv::CapabilityMinLod:
2606                pass &= require_feature(my_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2607                break;
2608
2609            case spv::CapabilitySampledCubeArray:
2610                pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2611                break;
2612
2613            case spv::CapabilityImageMSArray:
2614                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2615                break;
2616
2617            case spv::CapabilityStorageImageExtendedFormats:
2618                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageExtendedFormats,
2619                                        "shaderStorageImageExtendedFormats");
2620                break;
2621
2622            case spv::CapabilityInterpolationFunction:
2623                pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2624                break;
2625
2626            case spv::CapabilityStorageImageReadWithoutFormat:
2627                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2628                                        "shaderStorageImageReadWithoutFormat");
2629                break;
2630
2631            case spv::CapabilityStorageImageWriteWithoutFormat:
2632                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2633                                        "shaderStorageImageWriteWithoutFormat");
2634                break;
2635
2636            case spv::CapabilityMultiViewport:
2637                pass &= require_feature(my_data, enabledFeatures->multiViewport, "multiViewport");
2638                break;
2639
2640            default:
2641                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /* dev */0,
2642                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2643                            "Shader declares capability %u, not supported in Vulkan.",
2644                            insn.word(1)))
2645                    pass = VK_FALSE;
2646                break;
2647            }
2648        }
2649    }
2650
2651    return pass;
2652}
2653
2654
2655// Validate that the shaders used by the given pipeline and store the active_slots
2656//  that are actually used by the pipeline into pPipeline->active_slots
2657static VkBool32 validate_and_capture_pipeline_shader_state(layer_data *my_data, const VkDevice dev, PIPELINE_NODE *pPipeline) {
2658    VkGraphicsPipelineCreateInfo const *pCreateInfo = &pPipeline->graphicsPipelineCI;
2659    /* We seem to allow pipeline stages to be specified out of order, so collect and identify them
2660     * before trying to do anything more: */
2661    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2662    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2663
2664    shader_module *shaders[5];
2665    memset(shaders, 0, sizeof(shaders));
2666    spirv_inst_iter entrypoints[5];
2667    memset(entrypoints, 0, sizeof(entrypoints));
2668    RENDER_PASS_NODE const *rp = 0;
2669    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2670    VkBool32 pass = VK_TRUE;
2671
2672    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2673        VkPipelineShaderStageCreateInfo const *pStage = &pCreateInfo->pStages[i];
2674        if (pStage->sType == VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO) {
2675
2676            if ((pStage->stage & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT |
2677                                  VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) == 0) {
2678                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2679                            /*dev*/ 0, __LINE__, SHADER_CHECKER_UNKNOWN_STAGE, "SC", "Unknown shader stage %d", pStage->stage)) {
2680                    pass = VK_FALSE;
2681                }
2682            } else {
2683                pass = validate_specialization_offsets(my_data, pStage) && pass;
2684
2685                auto stage_id = get_shader_stage_id(pStage->stage);
2686                auto module = my_data->shaderModuleMap[pStage->module].get();
2687                shaders[stage_id] = module;
2688
2689                /* find the entrypoint */
2690                entrypoints[stage_id] = find_entrypoint(module, pStage->pName, pStage->stage);
2691                if (entrypoints[stage_id] == module->end()) {
2692                    if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2693                                /*dev*/ 0, __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2694                                "No entrypoint found named `%s` for stage %s", pStage->pName,
2695                                string_VkShaderStageFlagBits(pStage->stage))) {
2696                        pass = VK_FALSE;
2697                    }
2698                }
2699
2700                /* validate shader capabilities against enabled device features */
2701                pass = validate_shader_capabilities(my_data, dev, module) && pass;
2702
2703                /* mark accessible ids */
2704                std::unordered_set<uint32_t> accessible_ids;
2705                mark_accessible_ids(module, entrypoints[stage_id], accessible_ids);
2706
2707                /* validate descriptor set layout against what the entrypoint actually uses */
2708                std::map<descriptor_slot_t, interface_var> descriptor_uses;
2709                collect_interface_by_descriptor_slot(my_data, dev, module, accessible_ids, descriptor_uses);
2710
2711                auto layouts = pCreateInfo->layout != VK_NULL_HANDLE
2712                                   ? &(my_data->pipelineLayoutMap[pCreateInfo->layout].descriptorSetLayouts)
2713                                   : nullptr;
2714
2715                for (auto use : descriptor_uses) {
2716                    // While validating shaders capture which slots are used by the pipeline
2717                    pPipeline->active_slots[use.first.first].insert(use.first.second);
2718
2719                    /* find the matching binding */
2720                    auto binding = get_descriptor_binding(my_data, layouts, use.first);
2721                    unsigned required_descriptor_count;
2722
2723                    if (!binding) {
2724                        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2725                                    /*dev*/ 0, __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2726                                    "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2727                                    use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2728                            pass = VK_FALSE;
2729                        }
2730                    } else if (~binding->stageFlags & pStage->stage) {
2731                        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2732                                    /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2733                                    "Shader uses descriptor slot %u.%u (used "
2734                                    "as type `%s`) but descriptor not "
2735                                    "accessible from stage %s",
2736                                    use.first.first, use.first.second,
2737                                    describe_type(module, use.second.type_id).c_str(),
2738                                    string_VkShaderStageFlagBits(pStage->stage))) {
2739                            pass = VK_FALSE;
2740                        }
2741                    } else if (!descriptor_type_match(my_data, module, use.second.type_id, binding->descriptorType, /*out*/ required_descriptor_count)) {
2742                        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2743                                    /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2744                                    "Type mismatch on descriptor slot "
2745                                    "%u.%u (used as type `%s`) but "
2746                                    "descriptor of type %s",
2747                                    use.first.first, use.first.second,
2748                                    describe_type(module, use.second.type_id).c_str(),
2749                                    string_VkDescriptorType(binding->descriptorType))) {
2750                            pass = VK_FALSE;
2751                        }
2752                    } else if (binding->descriptorCount < required_descriptor_count) {
2753                        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2754                                    /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2755                                    "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2756                                    required_descriptor_count, use.first.first, use.first.second,
2757                                    describe_type(module, use.second.type_id).c_str(),
2758                                    binding->descriptorCount)) {
2759                            pass = VK_FALSE;
2760                        }
2761                    }
2762                }
2763
2764                /* validate push constant usage */
2765                pass =
2766                    validate_push_constant_usage(my_data, dev, &my_data->pipelineLayoutMap[pCreateInfo->layout].pushConstantRanges,
2767                                                 module, accessible_ids, pStage->stage) &&
2768                    pass;
2769            }
2770        }
2771    }
2772
2773    if (pCreateInfo->renderPass != VK_NULL_HANDLE)
2774        rp = my_data->renderPassMap[pCreateInfo->renderPass];
2775
2776    vi = pCreateInfo->pVertexInputState;
2777
2778    if (vi) {
2779        pass = validate_vi_consistency(my_data, dev, vi) && pass;
2780    }
2781
2782    if (shaders[vertex_stage]) {
2783        pass = validate_vi_against_vs_inputs(my_data, dev, vi, shaders[vertex_stage], entrypoints[vertex_stage]) && pass;
2784    }
2785
2786    /* TODO: enforce rules about present combinations of shaders */
2787    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2788    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2789
2790    while (!shaders[producer] && producer != fragment_stage) {
2791        producer++;
2792        consumer++;
2793    }
2794
2795    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2796        assert(shaders[producer]);
2797        if (shaders[consumer]) {
2798            pass = validate_interface_between_stages(my_data, dev, shaders[producer], entrypoints[producer],
2799                                                     shader_stage_attribs[producer].name, shaders[consumer], entrypoints[consumer],
2800                                                     shader_stage_attribs[consumer].name,
2801                                                     shader_stage_attribs[consumer].arrayed_input) &&
2802                   pass;
2803
2804            producer = consumer;
2805        }
2806    }
2807
2808    if (shaders[fragment_stage] && rp) {
2809        pass = validate_fs_outputs_against_render_pass(my_data, dev, shaders[fragment_stage], entrypoints[fragment_stage], rp,
2810                                                       pCreateInfo->subpass) &&
2811               pass;
2812    }
2813
2814    return pass;
2815}
2816
2817// Return Set node ptr for specified set or else NULL
2818static SET_NODE *getSetNode(layer_data *my_data, const VkDescriptorSet set) {
2819    if (my_data->setMap.find(set) == my_data->setMap.end()) {
2820        return NULL;
2821    }
2822    return my_data->setMap[set];
2823}
2824
2825// For given Layout Node and binding, return index where that binding begins
2826static uint32_t getBindingStartIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) {
2827    uint32_t offsetIndex = 0;
2828    for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
2829        if (pLayout->createInfo.pBindings[i].binding == binding)
2830            break;
2831        offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount;
2832    }
2833    return offsetIndex;
2834}
2835
2836// For given layout node and binding, return last index that is updated
2837static uint32_t getBindingEndIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) {
2838    uint32_t offsetIndex = 0;
2839    for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
2840        offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount;
2841        if (pLayout->createInfo.pBindings[i].binding == binding)
2842            break;
2843    }
2844    return offsetIndex - 1;
2845}
2846
2847// For the given command buffer, verify that for each set in activeSetBindingsPairs
2848//  that any dynamic descriptor in that set has a valid dynamic offset bound.
2849//  To be valid, the dynamic offset combined with the offset and range from its
2850//  descriptor update must not overflow the size of its buffer being updated
2851static VkBool32 validate_dynamic_offsets(layer_data *my_data, const GLOBAL_CB_NODE *pCB,
2852                                         const vector<std::pair<SET_NODE *, unordered_set<uint32_t>>> &activeSetBindingsPairs) {
2853    VkBool32 result = VK_FALSE;
2854
2855    VkWriteDescriptorSet *pWDS = NULL;
2856    uint32_t dynOffsetIndex = 0;
2857    VkDeviceSize bufferSize = 0;
2858    for (auto set_bindings_pair : activeSetBindingsPairs) {
2859        SET_NODE *set_node = set_bindings_pair.first;
2860        LAYOUT_NODE *layout_node = set_node->pLayout;
2861        for (auto binding : set_bindings_pair.second) {
2862            uint32_t startIdx = getBindingStartIndex(layout_node, binding);
2863            uint32_t endIdx = getBindingEndIndex(layout_node, binding);
2864            for (uint32_t i = startIdx; i <= endIdx; ++i) {
2865                // TODO : Flag error here if set_node->pDescriptorUpdates[i] is NULL
2866                switch (set_node->pDescriptorUpdates[i]->sType) {
2867                case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
2868                    pWDS = (VkWriteDescriptorSet *)set_node->pDescriptorUpdates[i];
2869                    if ((pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
2870                        (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
2871                        for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2872                            bufferSize = my_data->bufferMap[pWDS->pBufferInfo[j].buffer].create_info->size;
2873                            uint32_t dynOffset = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].dynamicOffsets[dynOffsetIndex];
2874                            if (pWDS->pBufferInfo[j].range == VK_WHOLE_SIZE) {
2875                                if ((dynOffset + pWDS->pBufferInfo[j].offset) > bufferSize) {
2876                                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2877                                                      VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2878                                                      reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2879                                                      DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
2880                                                      "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has range of "
2881                                                      "VK_WHOLE_SIZE but dynamic offset %#" PRIxLEAST32 ". "
2882                                                      "combined with offset %#" PRIxLEAST64 " oversteps its buffer (%#" PRIxLEAST64
2883                                                      ") which has a size of %#" PRIxLEAST64 ".",
2884                                                      reinterpret_cast<const uint64_t &>(set_node->set), i,
2885                                                      pCB->dynamicOffsets[dynOffsetIndex], pWDS->pBufferInfo[j].offset,
2886                                                      reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2887                                }
2888                            } else if ((dynOffset + pWDS->pBufferInfo[j].offset + pWDS->pBufferInfo[j].range) > bufferSize) {
2889                                result |= log_msg(
2890                                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2891                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2892                                    reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW,
2893                                    "DS",
2894                                    "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has dynamic offset %#" PRIxLEAST32 ". "
2895                                    "Combined with offset %#" PRIxLEAST64 " and range %#" PRIxLEAST64
2896                                    " from its update, this oversteps its buffer "
2897                                    "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".",
2898                                    reinterpret_cast<const uint64_t &>(set_node->set), i, pCB->dynamicOffsets[dynOffsetIndex],
2899                                    pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range,
2900                                    reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2901                            } else if ((dynOffset + pWDS->pBufferInfo[j].offset + pWDS->pBufferInfo[j].range) > bufferSize) {
2902                                result |= log_msg(
2903                                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2904                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2905                                    reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW,
2906                                    "DS",
2907                                    "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has dynamic offset %#" PRIxLEAST32 ". "
2908                                    "Combined with offset %#" PRIxLEAST64 " and range %#" PRIxLEAST64
2909                                    " from its update, this oversteps its buffer "
2910                                    "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".",
2911                                    reinterpret_cast<const uint64_t &>(set_node->set), i, pCB->dynamicOffsets[dynOffsetIndex],
2912                                    pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range,
2913                                    reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2914                            }
2915                            dynOffsetIndex++;
2916                            i += j; // Advance i to end of this set of descriptors (++i at end of for loop will move 1 index past
2917                                    // last of these descriptors)
2918                        }
2919                    }
2920                    break;
2921                default: // Currently only shadowing Write update nodes so shouldn't get here
2922                    assert(0);
2923                    continue;
2924                }
2925            }
2926        }
2927    }
2928    return result;
2929}
2930
2931// Validate overall state at the time of a draw call
2932static VkBool32 validate_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, VkBool32 indexedDraw) {
2933    PIPELINE_NODE *pPipe = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
2934    // First check flag states
2935    VkBool32 result = validate_draw_state_flags(my_data, pCB, pPipe, indexedDraw);
2936    // Now complete other state checks
2937    // TODO : Currently only performing next check if *something* was bound (non-zero last bound)
2938    //  There is probably a better way to gate when this check happens, and to know if something *should* have been bound
2939    //  We should have that check separately and then gate this check based on that check
2940    if (pPipe) {
2941        auto const &state = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS];
2942        if (state.pipelineLayout) {
2943            string errorString;
2944            // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
2945            vector<std::pair<SET_NODE *, unordered_set<uint32_t>>> activeSetBindingsPairs;
2946            for (auto setBindingPair : pPipe->active_slots) {
2947                uint32_t setIndex = setBindingPair.first;
2948                // If valid set is not bound throw an error
2949                if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
2950                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2951                                      __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
2952                                      "VkPipeline %#" PRIxLEAST64 " uses set #%u but that set is not bound.",
2953                                      (uint64_t)pPipe->pipeline, setIndex);
2954                } else if (!verify_set_layout_compatibility(my_data, my_data->setMap[state.boundDescriptorSets[setIndex]],
2955                                                            pPipe->graphicsPipelineCI.layout, setIndex, errorString)) {
2956                    // Set is bound but not compatible w/ overlapping pipelineLayout from PSO
2957                    VkDescriptorSet setHandle = my_data->setMap[state.boundDescriptorSets[setIndex]]->set;
2958                    result |= log_msg(
2959                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2960                        (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
2961                        "VkDescriptorSet (%#" PRIxLEAST64
2962                        ") bound as set #%u is not compatible with overlapping VkPipelineLayout %#" PRIxLEAST64 " due to: %s",
2963                        (uint64_t)setHandle, setIndex, (uint64_t)pPipe->graphicsPipelineCI.layout, errorString.c_str());
2964                } else { // Valid set is bound and layout compatible, validate that it's updated and verify any dynamic offsets
2965                    // Pull the set node
2966                    SET_NODE *pSet = my_data->setMap[state.boundDescriptorSets[setIndex]];
2967                    // Save vector of all active sets to verify dynamicOffsets below
2968                    // activeSetNodes.push_back(pSet);
2969                    activeSetBindingsPairs.push_back(std::make_pair(pSet, setBindingPair.second));
2970                    // Make sure set has been updated
2971                    if (!pSet->pUpdateStructs) {
2972                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2973                                          VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pSet->set, __LINE__,
2974                                          DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2975                                          "DS %#" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
2976                                                              "this will result in undefined behavior.",
2977                                          (uint64_t)pSet->set);
2978                    }
2979                }
2980            }
2981            // For each dynamic descriptor, make sure dynamic offset doesn't overstep buffer
2982            if (!state.dynamicOffsets.empty())
2983                result |= validate_dynamic_offsets(my_data, pCB, activeSetBindingsPairs);
2984        }
2985        // Verify Vtx binding
2986        if (pPipe->vertexBindingDescriptions.size() > 0) {
2987            for (size_t i = 0; i < pPipe->vertexBindingDescriptions.size(); i++) {
2988                if ((pCB->currentDrawData.buffers.size() < (i + 1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) {
2989                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2990                                      __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2991                                      "The Pipeline State Object (%#" PRIxLEAST64
2992                                      ") expects that this Command Buffer's vertex binding Index " PRINTF_SIZE_T_SPECIFIER
2993                                      " should be set via vkCmdBindVertexBuffers.",
2994                                      (uint64_t)pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline, i);
2995                }
2996            }
2997        } else {
2998            if (!pCB->currentDrawData.buffers.empty()) {
2999                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
3000                                  0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
3001                                  "Vertex buffers are bound to command buffer (%#" PRIxLEAST64
3002                                  ") but no vertex buffers are attached to this Pipeline State Object (%#" PRIxLEAST64 ").",
3003                                  (uint64_t)pCB->commandBuffer, (uint64_t)pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
3004            }
3005        }
3006        // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
3007        // Skip check if rasterization is disabled or there is no viewport.
3008        if ((!pPipe->graphicsPipelineCI.pRasterizationState ||
3009             !pPipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) &&
3010            pPipe->graphicsPipelineCI.pViewportState) {
3011            VkBool32 dynViewport = isDynamic(pPipe, VK_DYNAMIC_STATE_VIEWPORT);
3012            VkBool32 dynScissor = isDynamic(pPipe, VK_DYNAMIC_STATE_SCISSOR);
3013            if (dynViewport) {
3014                if (pCB->viewports.size() != pPipe->graphicsPipelineCI.pViewportState->viewportCount) {
3015                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3016                                      __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3017                                      "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER
3018                                      ", but PSO viewportCount is %u. These counts must match.",
3019                                      pCB->viewports.size(), pPipe->graphicsPipelineCI.pViewportState->viewportCount);
3020                }
3021            }
3022            if (dynScissor) {
3023                if (pCB->scissors.size() != pPipe->graphicsPipelineCI.pViewportState->scissorCount) {
3024                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3025                                      __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3026                                      "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER
3027                                      ", but PSO scissorCount is %u. These counts must match.",
3028                                      pCB->scissors.size(), pPipe->graphicsPipelineCI.pViewportState->scissorCount);
3029                }
3030            }
3031        }
3032    }
3033    return result;
3034}
3035
3036// Verify that create state for a pipeline is valid
3037static VkBool32 verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
3038                                          int pipelineIndex) {
3039    VkBool32 skipCall = VK_FALSE;
3040
3041    PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
3042
3043    // If create derivative bit is set, check that we've specified a base
3044    // pipeline correctly, and that the base pipeline was created to allow
3045    // derivatives.
3046    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3047        PIPELINE_NODE *pBasePipeline = nullptr;
3048        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3049              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3050            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3051                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3052                                "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3053        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3054            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3055                skipCall |=
3056                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3057                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3058                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
3059            } else {
3060                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3061            }
3062        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3063            pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3064        }
3065
3066        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3067            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3068                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3069                                "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3070        }
3071    }
3072
3073    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3074        if (!my_data->physDevProperties.features.independentBlend) {
3075            if (pPipeline->attachments.size() > 1) {
3076                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3077                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3078                    if ((pAttachments[0].blendEnable != pAttachments[i].blendEnable) ||
3079                        (pAttachments[0].srcColorBlendFactor != pAttachments[i].srcColorBlendFactor) ||
3080                        (pAttachments[0].dstColorBlendFactor != pAttachments[i].dstColorBlendFactor) ||
3081                        (pAttachments[0].colorBlendOp != pAttachments[i].colorBlendOp) ||
3082                        (pAttachments[0].srcAlphaBlendFactor != pAttachments[i].srcAlphaBlendFactor) ||
3083                        (pAttachments[0].dstAlphaBlendFactor != pAttachments[i].dstAlphaBlendFactor) ||
3084                        (pAttachments[0].alphaBlendOp != pAttachments[i].alphaBlendOp) ||
3085                        (pAttachments[0].colorWriteMask != pAttachments[i].colorWriteMask)) {
3086                        skipCall |=
3087                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3088                            DRAWSTATE_INDEPENDENT_BLEND, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
3089                            "enabled, all elements of pAttachments must be identical");
3090                    }
3091                }
3092            }
3093        }
3094        if (!my_data->physDevProperties.features.logicOp &&
3095            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3096            skipCall |=
3097                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3098                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
3099                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
3100        }
3101        if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) &&
3102            ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) ||
3103             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
3104            skipCall |=
3105                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3106                        DRAWSTATE_INVALID_LOGIC_OP, "DS",
3107                        "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
3108        }
3109    }
3110
3111    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3112    // produces nonsense errors that confuse users. Other layers should already
3113    // emit errors for renderpass being invalid.
3114    auto rp_data = my_data->renderPassMap.find(pPipeline->graphicsPipelineCI.renderPass);
3115    if (rp_data != my_data->renderPassMap.end() &&
3116        pPipeline->graphicsPipelineCI.subpass >= rp_data->second->pCreateInfo->subpassCount) {
3117        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3118                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3119                                                                           "is out of range for this renderpass (0..%u)",
3120                            pPipeline->graphicsPipelineCI.subpass, rp_data->second->pCreateInfo->subpassCount - 1);
3121    }
3122
3123    if (!validate_and_capture_pipeline_shader_state(my_data, device, pPipeline)) {
3124        skipCall = VK_TRUE;
3125    }
3126    // VS is required
3127    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3128        skipCall |=
3129            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3130                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3131    }
3132    // Either both or neither TC/TE shaders should be defined
3133    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3134        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3135        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3136                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3137                            "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3138    }
3139    // Compute shaders should be specified independent of Gfx shaders
3140    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3141        (pPipeline->active_shaders &
3142         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3143          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3144        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3145                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3146                            "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3147    }
3148    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3149    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3150    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3151        (pPipeline->iaStateCI.topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3152        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3153                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3154                                                                           "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3155                                                                           "topology for tessellation pipelines");
3156    }
3157    if (pPipeline->iaStateCI.topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3158        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3159            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3160                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3161                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3162                                                                               "topology is only valid for tessellation pipelines");
3163        }
3164        if (!pPipeline->tessStateCI.patchControlPoints || (pPipeline->tessStateCI.patchControlPoints > 32)) {
3165            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3166                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3167                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3168                                                                               "topology used with patchControlPoints value %u."
3169                                                                               " patchControlPoints should be >0 and <=32.",
3170                                pPipeline->tessStateCI.patchControlPoints);
3171        }
3172    }
3173    // Viewport state must be included if rasterization is enabled.
3174    // If the viewport state is included, the viewport and scissor counts should always match.
3175    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3176    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3177        !pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
3178        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3179            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3180                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3181                                                                           "and scissors are dynamic PSO must include "
3182                                                                           "viewportCount and scissorCount in pViewportState.");
3183        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3184                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3185            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3186                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3187                                "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3188                                pPipeline->vpStateCI.viewportCount, pPipeline->vpStateCI.scissorCount);
3189        } else {
3190            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3191            VkBool32 dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3192            VkBool32 dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3193            if (!dynViewport) {
3194                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3195                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3196                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3197                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3198                                        "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3199                                        "must either include pViewports data, or include viewport in pDynamicState and set it with "
3200                                        "vkCmdSetViewport().",
3201                                        pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3202                }
3203            }
3204            if (!dynScissor) {
3205                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3206                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3207                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3208                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3209                                        "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3210                                        "must either include pScissors data, or include scissor in pDynamicState and set it with "
3211                                        "vkCmdSetScissor().",
3212                                        pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3213                }
3214            }
3215        }
3216    }
3217    return skipCall;
3218}
3219
3220// Init the pipeline mapping info based on pipeline create info LL tree
3221//  Threading note : Calls to this function should wrapped in mutex
3222// TODO : this should really just be in the constructor for PIPELINE_NODE
3223static PIPELINE_NODE *initGraphicsPipeline(layer_data *dev_data, const VkGraphicsPipelineCreateInfo *pCreateInfo) {
3224    PIPELINE_NODE *pPipeline = new PIPELINE_NODE;
3225
3226    // First init create info
3227    memcpy(&pPipeline->graphicsPipelineCI, pCreateInfo, sizeof(VkGraphicsPipelineCreateInfo));
3228
3229    size_t bufferSize = 0;
3230    const VkPipelineVertexInputStateCreateInfo *pVICI = NULL;
3231    const VkPipelineColorBlendStateCreateInfo *pCBCI = NULL;
3232
3233    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
3234        const VkPipelineShaderStageCreateInfo *pPSSCI = &pCreateInfo->pStages[i];
3235
3236        switch (pPSSCI->stage) {
3237        case VK_SHADER_STAGE_VERTEX_BIT:
3238            memcpy(&pPipeline->vsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3239            pPipeline->active_shaders |= VK_SHADER_STAGE_VERTEX_BIT;
3240            break;
3241        case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
3242            memcpy(&pPipeline->tcsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3243            pPipeline->active_shaders |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
3244            break;
3245        case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
3246            memcpy(&pPipeline->tesCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3247            pPipeline->active_shaders |= VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
3248            break;
3249        case VK_SHADER_STAGE_GEOMETRY_BIT:
3250            memcpy(&pPipeline->gsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3251            pPipeline->active_shaders |= VK_SHADER_STAGE_GEOMETRY_BIT;
3252            break;
3253        case VK_SHADER_STAGE_FRAGMENT_BIT:
3254            memcpy(&pPipeline->fsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3255            pPipeline->active_shaders |= VK_SHADER_STAGE_FRAGMENT_BIT;
3256            break;
3257        case VK_SHADER_STAGE_COMPUTE_BIT:
3258            // TODO : Flag error, CS is specified through VkComputePipelineCreateInfo
3259            pPipeline->active_shaders |= VK_SHADER_STAGE_COMPUTE_BIT;
3260            break;
3261        default:
3262            // TODO : Flag error
3263            break;
3264        }
3265    }
3266    // Copy over GraphicsPipelineCreateInfo structure embedded pointers
3267    if (pCreateInfo->stageCount != 0) {
3268        pPipeline->graphicsPipelineCI.pStages = new VkPipelineShaderStageCreateInfo[pCreateInfo->stageCount];
3269        bufferSize = pCreateInfo->stageCount * sizeof(VkPipelineShaderStageCreateInfo);
3270        memcpy((void *)pPipeline->graphicsPipelineCI.pStages, pCreateInfo->pStages, bufferSize);
3271    }
3272    if (pCreateInfo->pVertexInputState != NULL) {
3273        pPipeline->vertexInputCI = *pCreateInfo->pVertexInputState;
3274        // Copy embedded ptrs
3275        pVICI = pCreateInfo->pVertexInputState;
3276        if (pVICI->vertexBindingDescriptionCount) {
3277            pPipeline->vertexBindingDescriptions = std::vector<VkVertexInputBindingDescription>(
3278                pVICI->pVertexBindingDescriptions, pVICI->pVertexBindingDescriptions + pVICI->vertexBindingDescriptionCount);
3279        }
3280        if (pVICI->vertexAttributeDescriptionCount) {
3281            pPipeline->vertexAttributeDescriptions = std::vector<VkVertexInputAttributeDescription>(
3282                pVICI->pVertexAttributeDescriptions, pVICI->pVertexAttributeDescriptions + pVICI->vertexAttributeDescriptionCount);
3283        }
3284        pPipeline->graphicsPipelineCI.pVertexInputState = &pPipeline->vertexInputCI;
3285    }
3286    if (pCreateInfo->pInputAssemblyState != NULL) {
3287        pPipeline->iaStateCI = *pCreateInfo->pInputAssemblyState;
3288        pPipeline->graphicsPipelineCI.pInputAssemblyState = &pPipeline->iaStateCI;
3289    }
3290    if (pCreateInfo->pTessellationState != NULL) {
3291        pPipeline->tessStateCI = *pCreateInfo->pTessellationState;
3292        pPipeline->graphicsPipelineCI.pTessellationState = &pPipeline->tessStateCI;
3293    }
3294    if (pCreateInfo->pViewportState != NULL) {
3295        pPipeline->vpStateCI = *pCreateInfo->pViewportState;
3296        pPipeline->graphicsPipelineCI.pViewportState = &pPipeline->vpStateCI;
3297    }
3298    if (pCreateInfo->pRasterizationState != NULL) {
3299        pPipeline->rsStateCI = *pCreateInfo->pRasterizationState;
3300        pPipeline->graphicsPipelineCI.pRasterizationState = &pPipeline->rsStateCI;
3301    }
3302    if (pCreateInfo->pMultisampleState != NULL) {
3303        pPipeline->msStateCI = *pCreateInfo->pMultisampleState;
3304        pPipeline->graphicsPipelineCI.pMultisampleState = &pPipeline->msStateCI;
3305    }
3306    if (pCreateInfo->pDepthStencilState != NULL) {
3307        pPipeline->dsStateCI = *pCreateInfo->pDepthStencilState;
3308        pPipeline->graphicsPipelineCI.pDepthStencilState = &pPipeline->dsStateCI;
3309    }
3310    if (pCreateInfo->pColorBlendState != NULL) {
3311        pPipeline->cbStateCI = *pCreateInfo->pColorBlendState;
3312        // Copy embedded ptrs
3313        pCBCI = pCreateInfo->pColorBlendState;
3314        if (pCBCI->attachmentCount) {
3315            pPipeline->attachments = std::vector<VkPipelineColorBlendAttachmentState>(
3316                pCBCI->pAttachments, pCBCI->pAttachments + pCBCI->attachmentCount);
3317        }
3318        pPipeline->graphicsPipelineCI.pColorBlendState = &pPipeline->cbStateCI;
3319    }
3320    if (pCreateInfo->pDynamicState != NULL) {
3321        pPipeline->dynStateCI = *pCreateInfo->pDynamicState;
3322        if (pPipeline->dynStateCI.dynamicStateCount) {
3323            pPipeline->dynStateCI.pDynamicStates = new VkDynamicState[pPipeline->dynStateCI.dynamicStateCount];
3324            bufferSize = pPipeline->dynStateCI.dynamicStateCount * sizeof(VkDynamicState);
3325            memcpy((void *)pPipeline->dynStateCI.pDynamicStates, pCreateInfo->pDynamicState->pDynamicStates, bufferSize);
3326        }
3327        pPipeline->graphicsPipelineCI.pDynamicState = &pPipeline->dynStateCI;
3328    }
3329    return pPipeline;
3330}
3331
3332// Free the Pipeline nodes
3333static void deletePipelines(layer_data *my_data) {
3334    if (my_data->pipelineMap.size() <= 0)
3335        return;
3336    for (auto ii = my_data->pipelineMap.begin(); ii != my_data->pipelineMap.end(); ++ii) {
3337        if ((*ii).second->graphicsPipelineCI.stageCount != 0) {
3338            delete[](*ii).second->graphicsPipelineCI.pStages;
3339        }
3340        if ((*ii).second->dynStateCI.dynamicStateCount != 0) {
3341            delete[](*ii).second->dynStateCI.pDynamicStates;
3342        }
3343        delete (*ii).second;
3344    }
3345    my_data->pipelineMap.clear();
3346}
3347
3348// For given pipeline, return number of MSAA samples, or one if MSAA disabled
3349static VkSampleCountFlagBits getNumSamples(layer_data *my_data, const VkPipeline pipeline) {
3350    PIPELINE_NODE *pPipe = my_data->pipelineMap[pipeline];
3351    if (VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pPipe->msStateCI.sType) {
3352        return pPipe->msStateCI.rasterizationSamples;
3353    }
3354    return VK_SAMPLE_COUNT_1_BIT;
3355}
3356
3357// Validate state related to the PSO
3358static VkBool32 validatePipelineState(layer_data *my_data, const GLOBAL_CB_NODE *pCB, const VkPipelineBindPoint pipelineBindPoint,
3359                                      const VkPipeline pipeline) {
3360    if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
3361        // Verify that any MSAA request in PSO matches sample# in bound FB
3362        // Skip the check if rasterization is disabled.
3363        PIPELINE_NODE *pPipeline = my_data->pipelineMap[pipeline];
3364        if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3365            !pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
3366            VkSampleCountFlagBits psoNumSamples = getNumSamples(my_data, pipeline);
3367            if (pCB->activeRenderPass) {
3368                const VkRenderPassCreateInfo *pRPCI = my_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
3369                const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
3370                VkSampleCountFlagBits subpassNumSamples = (VkSampleCountFlagBits)0;
3371                uint32_t i;
3372
3373                for (i = 0; i < pSD->colorAttachmentCount; i++) {
3374                    VkSampleCountFlagBits samples;
3375
3376                    if (pSD->pColorAttachments[i].attachment == VK_ATTACHMENT_UNUSED)
3377                        continue;
3378
3379                    samples = pRPCI->pAttachments[pSD->pColorAttachments[i].attachment].samples;
3380                    if (subpassNumSamples == (VkSampleCountFlagBits)0) {
3381                        subpassNumSamples = samples;
3382                    } else if (subpassNumSamples != samples) {
3383                        subpassNumSamples = (VkSampleCountFlagBits)-1;
3384                        break;
3385                    }
3386                }
3387                if (pSD->pDepthStencilAttachment && pSD->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3388                    const VkSampleCountFlagBits samples = pRPCI->pAttachments[pSD->pDepthStencilAttachment->attachment].samples;
3389                    if (subpassNumSamples == (VkSampleCountFlagBits)0)
3390                        subpassNumSamples = samples;
3391                    else if (subpassNumSamples != samples)
3392                        subpassNumSamples = (VkSampleCountFlagBits)-1;
3393                }
3394
3395                if (psoNumSamples != subpassNumSamples) {
3396                    return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3397                                   (uint64_t)pipeline, __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3398                                   "Num samples mismatch! Binding PSO (%#" PRIxLEAST64
3399                                   ") with %u samples while current RenderPass (%#" PRIxLEAST64 ") w/ %u samples!",
3400                                   (uint64_t)pipeline, psoNumSamples, (uint64_t)pCB->activeRenderPass, subpassNumSamples);
3401                }
3402            } else {
3403                // TODO : I believe it's an error if we reach this point and don't have an activeRenderPass
3404                //   Verify and flag error as appropriate
3405            }
3406        }
3407        // TODO : Add more checks here
3408    } else {
3409        // TODO : Validate non-gfx pipeline updates
3410    }
3411    return VK_FALSE;
3412}
3413
3414// Block of code at start here specifically for managing/tracking DSs
3415
3416// Return Pool node ptr for specified pool or else NULL
3417static DESCRIPTOR_POOL_NODE *getPoolNode(layer_data *my_data, const VkDescriptorPool pool) {
3418    if (my_data->descriptorPoolMap.find(pool) == my_data->descriptorPoolMap.end()) {
3419        return NULL;
3420    }
3421    return my_data->descriptorPoolMap[pool];
3422}
3423
3424static LAYOUT_NODE *getLayoutNode(layer_data *my_data, const VkDescriptorSetLayout layout) {
3425    if (my_data->descriptorSetLayoutMap.find(layout) == my_data->descriptorSetLayoutMap.end()) {
3426        return NULL;
3427    }
3428    return my_data->descriptorSetLayoutMap[layout];
3429}
3430
3431// Return VK_FALSE if update struct is of valid type, otherwise flag error and return code from callback
3432static VkBool32 validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3433    switch (pUpdateStruct->sType) {
3434    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3435    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3436        return VK_FALSE;
3437    default:
3438        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3439                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3440                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3441                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3442    }
3443}
3444
3445// Set count for given update struct in the last parameter
3446// Return value of skipCall, which is only VK_TRUE if error occurs and callback signals execution to cease
3447static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3448    switch (pUpdateStruct->sType) {
3449    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3450        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3451    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3452        // TODO : Need to understand this case better and make sure code is correct
3453        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3454    default:
3455        return 0;
3456    }
3457    return 0;
3458}
3459
3460// For given layout and update, return the first overall index of the layout that is updated
3461static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding,
3462                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3463    return getBindingStartIndex(pLayout, binding) + arrayIndex;
3464}
3465
3466// For given layout and update, return the last overall index of the layout that is updated
3467static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding,
3468                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3469    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3470    return getBindingStartIndex(pLayout, binding) + arrayIndex + count - 1;
3471}
3472
3473// Verify that the descriptor type in the update struct matches what's expected by the layout
3474static VkBool32 validateUpdateConsistency(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout,
3475                                          const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3476    // First get actual type of update
3477    VkBool32 skipCall = VK_FALSE;
3478    VkDescriptorType actualType;
3479    uint32_t i = 0;
3480    switch (pUpdateStruct->sType) {
3481    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3482        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3483        break;
3484    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3485        /* no need to validate */
3486        return VK_FALSE;
3487        break;
3488    default:
3489        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3490                            DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3491                            "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3492                            string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3493    }
3494    if (VK_FALSE == skipCall) {
3495        // Set first stageFlags as reference and verify that all other updates match it
3496        VkShaderStageFlags refStageFlags = pLayout->stageFlags[startIndex];
3497        for (i = startIndex; i <= endIndex; i++) {
3498            if (pLayout->descriptorTypes[i] != actualType) {
3499                skipCall |= log_msg(
3500                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3501                    DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3502                    "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3503                    string_VkDescriptorType(actualType), string_VkDescriptorType(pLayout->descriptorTypes[i]));
3504            }
3505            if (pLayout->stageFlags[i] != refStageFlags) {
3506                skipCall |= log_msg(
3507                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3508                    DRAWSTATE_DESCRIPTOR_STAGEFLAGS_MISMATCH, "DS",
3509                    "Write descriptor update has stageFlags %x that do not match overlapping binding descriptor stageFlags of %x!",
3510                    refStageFlags, pLayout->stageFlags[i]);
3511            }
3512        }
3513    }
3514    return skipCall;
3515}
3516
3517// Determine the update type, allocate a new struct of that type, shadow the given pUpdate
3518//   struct into the pNewNode param. Return VK_TRUE if error condition encountered and callback signals early exit.
3519// NOTE : Calls to this function should be wrapped in mutex
3520static VkBool32 shadowUpdateNode(layer_data *my_data, const VkDevice device, GENERIC_HEADER *pUpdate, GENERIC_HEADER **pNewNode) {
3521    VkBool32 skipCall = VK_FALSE;
3522    VkWriteDescriptorSet *pWDS = NULL;
3523    VkCopyDescriptorSet *pCDS = NULL;
3524    switch (pUpdate->sType) {
3525    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3526        pWDS = new VkWriteDescriptorSet;
3527        *pNewNode = (GENERIC_HEADER *)pWDS;
3528        memcpy(pWDS, pUpdate, sizeof(VkWriteDescriptorSet));
3529
3530        switch (pWDS->descriptorType) {
3531        case VK_DESCRIPTOR_TYPE_SAMPLER:
3532        case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3533        case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3534        case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
3535            VkDescriptorImageInfo *info = new VkDescriptorImageInfo[pWDS->descriptorCount];
3536            memcpy(info, pWDS->pImageInfo, pWDS->descriptorCount * sizeof(VkDescriptorImageInfo));
3537            pWDS->pImageInfo = info;
3538        } break;
3539        case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3540        case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
3541            VkBufferView *info = new VkBufferView[pWDS->descriptorCount];
3542            memcpy(info, pWDS->pTexelBufferView, pWDS->descriptorCount * sizeof(VkBufferView));
3543            pWDS->pTexelBufferView = info;
3544        } break;
3545        case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3546        case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3547        case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3548        case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
3549            VkDescriptorBufferInfo *info = new VkDescriptorBufferInfo[pWDS->descriptorCount];
3550            memcpy(info, pWDS->pBufferInfo, pWDS->descriptorCount * sizeof(VkDescriptorBufferInfo));
3551            pWDS->pBufferInfo = info;
3552        } break;
3553        default:
3554            return VK_ERROR_VALIDATION_FAILED_EXT;
3555            break;
3556        }
3557        break;
3558    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3559        pCDS = new VkCopyDescriptorSet;
3560        *pNewNode = (GENERIC_HEADER *)pCDS;
3561        memcpy(pCDS, pUpdate, sizeof(VkCopyDescriptorSet));
3562        break;
3563    default:
3564        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3565                    DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3566                    "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3567                    string_VkStructureType(pUpdate->sType), pUpdate->sType))
3568            return VK_TRUE;
3569    }
3570    // Make sure that pNext for the end of shadow copy is NULL
3571    (*pNewNode)->pNext = NULL;
3572    return skipCall;
3573}
3574
3575// Verify that given sampler is valid
3576static VkBool32 validateSampler(const layer_data *my_data, const VkSampler *pSampler, const VkBool32 immutable) {
3577    VkBool32 skipCall = VK_FALSE;
3578    auto sampIt = my_data->sampleMap.find(*pSampler);
3579    if (sampIt == my_data->sampleMap.end()) {
3580        if (!immutable) {
3581            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3582                                (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3583                                "vkUpdateDescriptorSets: Attempt to update descriptor with invalid sampler %#" PRIxLEAST64,
3584                                (uint64_t)*pSampler);
3585        } else { // immutable
3586            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3587                                (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3588                                "vkUpdateDescriptorSets: Attempt to update descriptor whose binding has an invalid immutable "
3589                                "sampler %#" PRIxLEAST64,
3590                                (uint64_t)*pSampler);
3591        }
3592    } else {
3593        // TODO : Any further checks we want to do on the sampler?
3594    }
3595    return skipCall;
3596}
3597
3598// find layout(s) on the cmd buf level
3599bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3600    ImageSubresourcePair imgpair = {image, true, range};
3601    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3602    if (imgsubIt == pCB->imageLayoutMap.end()) {
3603        imgpair = {image, false, VkImageSubresource()};
3604        imgsubIt = pCB->imageLayoutMap.find(imgpair);
3605        if (imgsubIt == pCB->imageLayoutMap.end())
3606            return false;
3607    }
3608    node = imgsubIt->second;
3609    return true;
3610}
3611
3612// find layout(s) on the global level
3613bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3614    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3615    if (imgsubIt == my_data->imageLayoutMap.end()) {
3616        imgpair = {imgpair.image, false, VkImageSubresource()};
3617        imgsubIt = my_data->imageLayoutMap.find(imgpair);
3618        if (imgsubIt == my_data->imageLayoutMap.end())
3619            return false;
3620    }
3621    layout = imgsubIt->second.layout;
3622    return true;
3623}
3624
3625bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3626    ImageSubresourcePair imgpair = {image, true, range};
3627    return FindLayout(my_data, imgpair, layout);
3628}
3629
3630bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3631    auto sub_data = my_data->imageSubresourceMap.find(image);
3632    if (sub_data == my_data->imageSubresourceMap.end())
3633        return false;
3634    auto imgIt = my_data->imageMap.find(image);
3635    if (imgIt == my_data->imageMap.end())
3636        return false;
3637    bool ignoreGlobal = false;
3638    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3639    // potential errors in this case.
3640    if (sub_data->second.size() >= (imgIt->second.createInfo.arrayLayers * imgIt->second.createInfo.mipLevels + 1)) {
3641        ignoreGlobal = true;
3642    }
3643    for (auto imgsubpair : sub_data->second) {
3644        if (ignoreGlobal && !imgsubpair.hasSubresource)
3645            continue;
3646        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3647        if (img_data != my_data->imageLayoutMap.end()) {
3648            layouts.push_back(img_data->second.layout);
3649        }
3650    }
3651    return true;
3652}
3653
3654// Set the layout on the global level
3655void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3656    VkImage &image = imgpair.image;
3657    // TODO (mlentine): Maybe set format if new? Not used atm.
3658    my_data->imageLayoutMap[imgpair].layout = layout;
3659    // TODO (mlentine): Maybe make vector a set?
3660    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3661    if (subresource == my_data->imageSubresourceMap[image].end()) {
3662        my_data->imageSubresourceMap[image].push_back(imgpair);
3663    }
3664}
3665
3666// Set the layout on the cmdbuf level
3667void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3668    pCB->imageLayoutMap[imgpair] = node;
3669    // TODO (mlentine): Maybe make vector a set?
3670    auto subresource =
3671        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3672    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3673        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3674    }
3675}
3676
3677void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3678    // TODO (mlentine): Maybe make vector a set?
3679    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3680        pCB->imageSubresourceMap[imgpair.image].end()) {
3681        pCB->imageLayoutMap[imgpair].layout = layout;
3682    } else {
3683        // TODO (mlentine): Could be expensive and might need to be removed.
3684        assert(imgpair.hasSubresource);
3685        IMAGE_CMD_BUF_LAYOUT_NODE node;
3686        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3687            node.initialLayout = layout;
3688        }
3689        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3690    }
3691}
3692
3693template <class OBJECT, class LAYOUT>
3694void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3695    if (imgpair.subresource.aspectMask & aspectMask) {
3696        imgpair.subresource.aspectMask = aspectMask;
3697        SetLayout(pObject, imgpair, layout);
3698    }
3699}
3700
3701template <class OBJECT, class LAYOUT>
3702void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3703    ImageSubresourcePair imgpair = {image, true, range};
3704    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3705    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3706    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3707    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3708}
3709
3710template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3711    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3712    SetLayout(pObject, image, imgpair, layout);
3713}
3714
3715void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3716    auto image_view_data = dev_data->imageViewMap.find(imageView);
3717    assert(image_view_data != dev_data->imageViewMap.end());
3718    const VkImage &image = image_view_data->second.image;
3719    const VkImageSubresourceRange &subRange = image_view_data->second.subresourceRange;
3720    // TODO: Do not iterate over every possibility - consolidate where possible
3721    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3722        uint32_t level = subRange.baseMipLevel + j;
3723        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3724            uint32_t layer = subRange.baseArrayLayer + k;
3725            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3726            SetLayout(pCB, image, sub, layout);
3727        }
3728    }
3729}
3730
3731// Verify that given imageView is valid
3732static VkBool32 validateImageView(const layer_data *my_data, const VkImageView *pImageView, const VkImageLayout imageLayout) {
3733    VkBool32 skipCall = VK_FALSE;
3734    auto ivIt = my_data->imageViewMap.find(*pImageView);
3735    if (ivIt == my_data->imageViewMap.end()) {
3736        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3737                            (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3738                            "vkUpdateDescriptorSets: Attempt to update descriptor with invalid imageView %#" PRIxLEAST64,
3739                            (uint64_t)*pImageView);
3740    } else {
3741        // Validate that imageLayout is compatible with aspectMask and image format
3742        VkImageAspectFlags aspectMask = ivIt->second.subresourceRange.aspectMask;
3743        VkImage image = ivIt->second.image;
3744        // TODO : Check here in case we have a bad image
3745        VkFormat format = VK_FORMAT_MAX_ENUM;
3746        auto imgIt = my_data->imageMap.find(image);
3747        if (imgIt != my_data->imageMap.end()) {
3748            format = (*imgIt).second.createInfo.format;
3749        } else {
3750            // Also need to check the swapchains.
3751            auto swapchainIt = my_data->device_extensions.imageToSwapchainMap.find(image);
3752            if (swapchainIt != my_data->device_extensions.imageToSwapchainMap.end()) {
3753                VkSwapchainKHR swapchain = swapchainIt->second;
3754                auto swapchain_nodeIt = my_data->device_extensions.swapchainMap.find(swapchain);
3755                if (swapchain_nodeIt != my_data->device_extensions.swapchainMap.end()) {
3756                    SWAPCHAIN_NODE *pswapchain_node = swapchain_nodeIt->second;
3757                    format = pswapchain_node->createInfo.imageFormat;
3758                }
3759            }
3760        }
3761        if (format == VK_FORMAT_MAX_ENUM) {
3762            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3763                                (uint64_t)image, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3764                                "vkUpdateDescriptorSets: Attempt to update descriptor with invalid image %#" PRIxLEAST64
3765                                " in imageView %#" PRIxLEAST64,
3766                                (uint64_t)image, (uint64_t)*pImageView);
3767        } else {
3768            VkBool32 ds = vk_format_is_depth_or_stencil(format);
3769            switch (imageLayout) {
3770            case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
3771                // Only Color bit must be set
3772                if ((aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
3773                    skipCall |=
3774                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3775                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3776                                "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3777                                "and imageView %#" PRIxLEAST64 ""
3778                                " that does not have VK_IMAGE_ASPECT_COLOR_BIT set.",
3779                                (uint64_t)*pImageView);
3780                }
3781                // format must NOT be DS
3782                if (ds) {
3783                    skipCall |=
3784                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3785                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3786                                "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3787                                "and imageView %#" PRIxLEAST64 ""
3788                                " but the image format is %s which is not a color format.",
3789                                (uint64_t)*pImageView, string_VkFormat(format));
3790                }
3791                break;
3792            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
3793            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
3794                // Depth or stencil bit must be set, but both must NOT be set
3795                if (aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
3796                    if (aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
3797                        // both  must NOT be set
3798                        skipCall |=
3799                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3800                                    (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3801                                    "vkUpdateDescriptorSets: Updating descriptor with imageView %#" PRIxLEAST64 ""
3802                                    " that has both STENCIL and DEPTH aspects set",
3803                                    (uint64_t)*pImageView);
3804                    }
3805                } else if (!(aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
3806                    // Neither were set
3807                    skipCall |=
3808                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3809                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3810                                "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3811                                " that does not have STENCIL or DEPTH aspect set.",
3812                                string_VkImageLayout(imageLayout), (uint64_t)*pImageView);
3813                }
3814                // format must be DS
3815                if (!ds) {
3816                    skipCall |=
3817                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3818                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3819                                "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3820                                " but the image format is %s which is not a depth/stencil format.",
3821                                string_VkImageLayout(imageLayout), (uint64_t)*pImageView, string_VkFormat(format));
3822                }
3823                break;
3824            default:
3825                // anything to check for other layouts?
3826                break;
3827            }
3828        }
3829    }
3830    return skipCall;
3831}
3832
3833// Verify that given bufferView is valid
3834static VkBool32 validateBufferView(const layer_data *my_data, const VkBufferView *pBufferView) {
3835    VkBool32 skipCall = VK_FALSE;
3836    auto sampIt = my_data->bufferViewMap.find(*pBufferView);
3837    if (sampIt == my_data->bufferViewMap.end()) {
3838        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT,
3839                            (uint64_t)*pBufferView, __LINE__, DRAWSTATE_BUFFERVIEW_DESCRIPTOR_ERROR, "DS",
3840                            "vkUpdateDescriptorSets: Attempt to update descriptor with invalid bufferView %#" PRIxLEAST64,
3841                            (uint64_t)*pBufferView);
3842    } else {
3843        // TODO : Any further checks we want to do on the bufferView?
3844    }
3845    return skipCall;
3846}
3847
3848// Verify that given bufferInfo is valid
3849static VkBool32 validateBufferInfo(const layer_data *my_data, const VkDescriptorBufferInfo *pBufferInfo) {
3850    VkBool32 skipCall = VK_FALSE;
3851    auto sampIt = my_data->bufferMap.find(pBufferInfo->buffer);
3852    if (sampIt == my_data->bufferMap.end()) {
3853        skipCall |=
3854            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3855                    (uint64_t)pBufferInfo->buffer, __LINE__, DRAWSTATE_BUFFERINFO_DESCRIPTOR_ERROR, "DS",
3856                    "vkUpdateDescriptorSets: Attempt to update descriptor where bufferInfo has invalid buffer %#" PRIxLEAST64,
3857                    (uint64_t)pBufferInfo->buffer);
3858    } else {
3859        // TODO : Any further checks we want to do on the bufferView?
3860    }
3861    return skipCall;
3862}
3863
3864static VkBool32 validateUpdateContents(const layer_data *my_data, const VkWriteDescriptorSet *pWDS,
3865                                       const VkDescriptorSetLayoutBinding *pLayoutBinding) {
3866    VkBool32 skipCall = VK_FALSE;
3867    // First verify that for the given Descriptor type, the correct DescriptorInfo data is supplied
3868    const VkSampler *pSampler = NULL;
3869    VkBool32 immutable = VK_FALSE;
3870    uint32_t i = 0;
3871    // For given update type, verify that update contents are correct
3872    switch (pWDS->descriptorType) {
3873    case VK_DESCRIPTOR_TYPE_SAMPLER:
3874        for (i = 0; i < pWDS->descriptorCount; ++i) {
3875            skipCall |= validateSampler(my_data, &(pWDS->pImageInfo[i].sampler), immutable);
3876        }
3877        break;
3878    case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3879        for (i = 0; i < pWDS->descriptorCount; ++i) {
3880            if (NULL == pLayoutBinding->pImmutableSamplers) {
3881                pSampler = &(pWDS->pImageInfo[i].sampler);
3882                if (immutable) {
3883                    skipCall |= log_msg(
3884                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3885                        (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3886                        "vkUpdateDescriptorSets: Update #%u is not an immutable sampler %#" PRIxLEAST64
3887                        ", but previous update(s) from this "
3888                        "VkWriteDescriptorSet struct used an immutable sampler. All updates from a single struct must either "
3889                        "use immutable or non-immutable samplers.",
3890                        i, (uint64_t)*pSampler);
3891                }
3892            } else {
3893                if (i > 0 && !immutable) {
3894                    skipCall |= log_msg(
3895                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3896                        (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3897                        "vkUpdateDescriptorSets: Update #%u is an immutable sampler, but previous update(s) from this "
3898                        "VkWriteDescriptorSet struct used a non-immutable sampler. All updates from a single struct must either "
3899                        "use immutable or non-immutable samplers.",
3900                        i);
3901                }
3902                immutable = VK_TRUE;
3903                pSampler = &(pLayoutBinding->pImmutableSamplers[i]);
3904            }
3905            skipCall |= validateSampler(my_data, pSampler, immutable);
3906        }
3907    // Intentionally fall through here to also validate image stuff
3908    case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3909    case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
3910    case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
3911        for (i = 0; i < pWDS->descriptorCount; ++i) {
3912            skipCall |= validateImageView(my_data, &(pWDS->pImageInfo[i].imageView), pWDS->pImageInfo[i].imageLayout);
3913        }
3914        break;
3915    case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3916    case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
3917        for (i = 0; i < pWDS->descriptorCount; ++i) {
3918            skipCall |= validateBufferView(my_data, &(pWDS->pTexelBufferView[i]));
3919        }
3920        break;
3921    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3922    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3923    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3924    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
3925        for (i = 0; i < pWDS->descriptorCount; ++i) {
3926            skipCall |= validateBufferInfo(my_data, &(pWDS->pBufferInfo[i]));
3927        }
3928        break;
3929    default:
3930        break;
3931    }
3932    return skipCall;
3933}
3934// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3935// func_str is the name of the calling function
3936// Return VK_FALSE if no errors occur
3937// Return VK_TRUE if validation error occurs and callback returns VK_TRUE (to skip upcoming API call down the chain)
3938VkBool32 validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
3939    VkBool32 skip_call = VK_FALSE;
3940    auto set_node = my_data->setMap.find(set);
3941    if (set_node == my_data->setMap.end()) {
3942        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3943                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3944                             "Cannot call %s() on descriptor set %" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3945                             (uint64_t)(set));
3946    } else {
3947        if (set_node->second->in_use.load()) {
3948            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3949                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
3950                                 "DS", "Cannot call %s() on descriptor set %" PRIxLEAST64 " that is in use by a command buffer.",
3951                                 func_str.c_str(), (uint64_t)(set));
3952        }
3953    }
3954    return skip_call;
3955}
3956static void invalidateBoundCmdBuffers(layer_data *dev_data, const SET_NODE *pSet) {
3957    // Flag any CBs this set is bound to as INVALID
3958    for (auto cb : pSet->boundCmdBuffers) {
3959        auto cb_node = dev_data->commandBufferMap.find(cb);
3960        if (cb_node != dev_data->commandBufferMap.end()) {
3961            cb_node->second->state = CB_INVALID;
3962        }
3963    }
3964}
3965// update DS mappings based on write and copy update arrays
3966static VkBool32 dsUpdate(layer_data *my_data, VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pWDS,
3967                         uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pCDS) {
3968    VkBool32 skipCall = VK_FALSE;
3969
3970    LAYOUT_NODE *pLayout = NULL;
3971    VkDescriptorSetLayoutCreateInfo *pLayoutCI = NULL;
3972    // Validate Write updates
3973    uint32_t i = 0;
3974    for (i = 0; i < descriptorWriteCount; i++) {
3975        VkDescriptorSet ds = pWDS[i].dstSet;
3976        SET_NODE *pSet = my_data->setMap[ds];
3977        // Set being updated cannot be in-flight
3978        if ((skipCall = validateIdleDescriptorSet(my_data, ds, "VkUpdateDescriptorSets")) == VK_TRUE)
3979            return skipCall;
3980        // If set is bound to any cmdBuffers, mark them invalid
3981        invalidateBoundCmdBuffers(my_data, pSet);
3982        GENERIC_HEADER *pUpdate = (GENERIC_HEADER *)&pWDS[i];
3983        pLayout = pSet->pLayout;
3984        // First verify valid update struct
3985        if ((skipCall = validUpdateStruct(my_data, device, pUpdate)) == VK_TRUE) {
3986            break;
3987        }
3988        uint32_t binding = 0, endIndex = 0;
3989        binding = pWDS[i].dstBinding;
3990        auto bindingToIndex = pLayout->bindingToIndexMap.find(binding);
3991        // Make sure that layout being updated has the binding being updated
3992        if (bindingToIndex == pLayout->bindingToIndexMap.end()) {
3993            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3994                                (uint64_t)(ds), __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
3995                                "Descriptor Set %" PRIu64 " does not have binding to match "
3996                                "update binding %u for update type "
3997                                "%s!",
3998                                (uint64_t)(ds), binding, string_VkStructureType(pUpdate->sType));
3999        } else {
4000            // Next verify that update falls within size of given binding
4001            endIndex = getUpdateEndIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate);
4002            if (getBindingEndIndex(pLayout, binding) < endIndex) {
4003                pLayoutCI = &pLayout->createInfo;
4004                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
4005                skipCall |=
4006                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4007                            (uint64_t)(ds), __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4008                            "Descriptor update type of %s is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4009                            string_VkStructureType(pUpdate->sType), binding, DSstr.c_str());
4010            } else { // TODO : should we skip update on a type mismatch or force it?
4011                uint32_t startIndex;
4012                startIndex = getUpdateStartIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate);
4013                // Layout bindings match w/ update, now verify that update type
4014                // & stageFlags are the same for entire update
4015                if ((skipCall = validateUpdateConsistency(my_data, device, pLayout, pUpdate, startIndex, endIndex)) == VK_FALSE) {
4016                    // The update is within bounds and consistent, but need to
4017                    // make sure contents make sense as well
4018                    if ((skipCall = validateUpdateContents(my_data, &pWDS[i],
4019                                                           &pLayout->createInfo.pBindings[bindingToIndex->second])) == VK_FALSE) {
4020                        // Update is good. Save the update info
4021                        // Create new update struct for this set's shadow copy
4022                        GENERIC_HEADER *pNewNode = NULL;
4023                        skipCall |= shadowUpdateNode(my_data, device, pUpdate, &pNewNode);
4024                        if (NULL == pNewNode) {
4025                            skipCall |= log_msg(
4026                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4027                                (uint64_t)(ds), __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
4028                                "Out of memory while attempting to allocate UPDATE struct in vkUpdateDescriptors()");
4029                        } else {
4030                            // Insert shadow node into LL of updates for this set
4031                            pNewNode->pNext = pSet->pUpdateStructs;
4032                            pSet->pUpdateStructs = pNewNode;
4033                            // Now update appropriate descriptor(s) to point to new Update node
4034                            for (uint32_t j = startIndex; j <= endIndex; j++) {
4035                                assert(j < pSet->descriptorCount);
4036                                pSet->pDescriptorUpdates[j] = pNewNode;
4037                            }
4038                        }
4039                    }
4040                }
4041            }
4042        }
4043    }
4044    // Now validate copy updates
4045    for (i = 0; i < descriptorCopyCount; ++i) {
4046        SET_NODE *pSrcSet = NULL, *pDstSet = NULL;
4047        LAYOUT_NODE *pSrcLayout = NULL, *pDstLayout = NULL;
4048        uint32_t srcStartIndex = 0, srcEndIndex = 0, dstStartIndex = 0, dstEndIndex = 0;
4049        // For each copy make sure that update falls within given layout and that types match
4050        pSrcSet = my_data->setMap[pCDS[i].srcSet];
4051        pDstSet = my_data->setMap[pCDS[i].dstSet];
4052        // Set being updated cannot be in-flight
4053        if ((skipCall = validateIdleDescriptorSet(my_data, pDstSet->set, "VkUpdateDescriptorSets")) == VK_TRUE)
4054            return skipCall;
4055        invalidateBoundCmdBuffers(my_data, pDstSet);
4056        pSrcLayout = pSrcSet->pLayout;
4057        pDstLayout = pDstSet->pLayout;
4058        // Validate that src binding is valid for src set layout
4059        if (pSrcLayout->bindingToIndexMap.find(pCDS[i].srcBinding) == pSrcLayout->bindingToIndexMap.end()) {
4060            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4061                                (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
4062                                "Copy descriptor update %u has srcBinding %u "
4063                                "which is out of bounds for underlying SetLayout "
4064                                "%#" PRIxLEAST64 " which only has bindings 0-%u.",
4065                                i, pCDS[i].srcBinding, (uint64_t)pSrcLayout->layout, pSrcLayout->createInfo.bindingCount - 1);
4066        } else if (pDstLayout->bindingToIndexMap.find(pCDS[i].dstBinding) == pDstLayout->bindingToIndexMap.end()) {
4067            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4068                                (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
4069                                "Copy descriptor update %u has dstBinding %u "
4070                                "which is out of bounds for underlying SetLayout "
4071                                "%#" PRIxLEAST64 " which only has bindings 0-%u.",
4072                                i, pCDS[i].dstBinding, (uint64_t)pDstLayout->layout, pDstLayout->createInfo.bindingCount - 1);
4073        } else {
4074            // Proceed with validation. Bindings are ok, but make sure update is within bounds of given layout
4075            srcEndIndex = getUpdateEndIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement,
4076                                            (const GENERIC_HEADER *)&(pCDS[i]));
4077            dstEndIndex = getUpdateEndIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement,
4078                                            (const GENERIC_HEADER *)&(pCDS[i]));
4079            if (getBindingEndIndex(pSrcLayout, pCDS[i].srcBinding) < srcEndIndex) {
4080                pLayoutCI = &pSrcLayout->createInfo;
4081                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
4082                skipCall |=
4083                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4084                            (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4085                            "Copy descriptor src update is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4086                            pCDS[i].srcBinding, DSstr.c_str());
4087            } else if (getBindingEndIndex(pDstLayout, pCDS[i].dstBinding) < dstEndIndex) {
4088                pLayoutCI = &pDstLayout->createInfo;
4089                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
4090                skipCall |=
4091                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4092                            (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4093                            "Copy descriptor dest update is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4094                            pCDS[i].dstBinding, DSstr.c_str());
4095            } else {
4096                srcStartIndex = getUpdateStartIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement,
4097                                                    (const GENERIC_HEADER *)&(pCDS[i]));
4098                dstStartIndex = getUpdateStartIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement,
4099                                                    (const GENERIC_HEADER *)&(pCDS[i]));
4100                for (uint32_t j = 0; j < pCDS[i].descriptorCount; ++j) {
4101                    // For copy just make sure that the types match and then perform the update
4102                    if (pSrcLayout->descriptorTypes[srcStartIndex + j] != pDstLayout->descriptorTypes[dstStartIndex + j]) {
4103                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4104                                            __LINE__, DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
4105                                            "Copy descriptor update index %u, update count #%u, has src update descriptor type %s "
4106                                            "that does not match overlapping dest descriptor type of %s!",
4107                                            i, j + 1, string_VkDescriptorType(pSrcLayout->descriptorTypes[srcStartIndex + j]),
4108                                            string_VkDescriptorType(pDstLayout->descriptorTypes[dstStartIndex + j]));
4109                    } else {
4110                        // point dst descriptor at corresponding src descriptor
4111                        // TODO : This may be a hole. I believe copy should be its own copy,
4112                        //  otherwise a subsequent write update to src will incorrectly affect the copy
4113                        pDstSet->pDescriptorUpdates[j + dstStartIndex] = pSrcSet->pDescriptorUpdates[j + srcStartIndex];
4114                        pDstSet->pUpdateStructs = pSrcSet->pUpdateStructs;
4115                    }
4116                }
4117            }
4118        }
4119    }
4120    return skipCall;
4121}
4122
4123// Verify that given pool has descriptors that are being requested for allocation.
4124// NOTE : Calls to this function should be wrapped in mutex
4125static VkBool32 validate_descriptor_availability_in_pool(layer_data *dev_data, DESCRIPTOR_POOL_NODE *pPoolNode, uint32_t count,
4126                                                         const VkDescriptorSetLayout *pSetLayouts) {
4127    VkBool32 skipCall = VK_FALSE;
4128    uint32_t i = 0;
4129    uint32_t j = 0;
4130
4131    // Track number of descriptorSets allowable in this pool
4132    if (pPoolNode->availableSets < count) {
4133        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4134                            reinterpret_cast<uint64_t &>(pPoolNode->pool), __LINE__, DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
4135                            "Unable to allocate %u descriptorSets from pool %#" PRIxLEAST64
4136                            ". This pool only has %d descriptorSets remaining.",
4137                            count, reinterpret_cast<uint64_t &>(pPoolNode->pool), pPoolNode->availableSets);
4138    } else {
4139        pPoolNode->availableSets -= count;
4140    }
4141
4142    for (i = 0; i < count; ++i) {
4143        LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pSetLayouts[i]);
4144        if (NULL == pLayout) {
4145            skipCall |=
4146                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
4147                        (uint64_t)pSetLayouts[i], __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
4148                        "Unable to find set layout node for layout %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
4149                        (uint64_t)pSetLayouts[i]);
4150        } else {
4151            uint32_t typeIndex = 0, poolSizeCount = 0;
4152            for (j = 0; j < pLayout->createInfo.bindingCount; ++j) {
4153                typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType);
4154                poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount;
4155                if (poolSizeCount > pPoolNode->availableDescriptorTypeCount[typeIndex]) {
4156                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4157                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pLayout->layout, __LINE__,
4158                                        DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
4159                                        "Unable to allocate %u descriptors of type %s from pool %#" PRIxLEAST64
4160                                        ". This pool only has %d descriptors of this type remaining.",
4161                                        poolSizeCount, string_VkDescriptorType(pLayout->createInfo.pBindings[j].descriptorType),
4162                                        (uint64_t)pPoolNode->pool, pPoolNode->availableDescriptorTypeCount[typeIndex]);
4163                } else { // Decrement available descriptors of this type
4164                    pPoolNode->availableDescriptorTypeCount[typeIndex] -= poolSizeCount;
4165                }
4166            }
4167        }
4168    }
4169    return skipCall;
4170}
4171
4172// Free the shadowed update node for this Set
4173// NOTE : Calls to this function should be wrapped in mutex
4174static void freeShadowUpdateTree(SET_NODE *pSet) {
4175    GENERIC_HEADER *pShadowUpdate = pSet->pUpdateStructs;
4176    pSet->pUpdateStructs = NULL;
4177    GENERIC_HEADER *pFreeUpdate = pShadowUpdate;
4178    // Clear the descriptor mappings as they will now be invalid
4179    pSet->pDescriptorUpdates.clear();
4180    while (pShadowUpdate) {
4181        pFreeUpdate = pShadowUpdate;
4182        pShadowUpdate = (GENERIC_HEADER *)pShadowUpdate->pNext;
4183        VkWriteDescriptorSet *pWDS = NULL;
4184        switch (pFreeUpdate->sType) {
4185        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
4186            pWDS = (VkWriteDescriptorSet *)pFreeUpdate;
4187            switch (pWDS->descriptorType) {
4188            case VK_DESCRIPTOR_TYPE_SAMPLER:
4189            case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
4190            case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
4191            case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
4192                delete[] pWDS->pImageInfo;
4193            } break;
4194            case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
4195            case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
4196                delete[] pWDS->pTexelBufferView;
4197            } break;
4198            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
4199            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
4200            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
4201            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
4202                delete[] pWDS->pBufferInfo;
4203            } break;
4204            default:
4205                break;
4206            }
4207            break;
4208        case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
4209            break;
4210        default:
4211            assert(0);
4212            break;
4213        }
4214        delete pFreeUpdate;
4215    }
4216}
4217
4218// Free all DS Pools including their Sets & related sub-structs
4219// NOTE : Calls to this function should be wrapped in mutex
4220static void deletePools(layer_data *my_data) {
4221    if (my_data->descriptorPoolMap.size() <= 0)
4222        return;
4223    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
4224        SET_NODE *pSet = (*ii).second->pSets;
4225        SET_NODE *pFreeSet = pSet;
4226        while (pSet) {
4227            pFreeSet = pSet;
4228            pSet = pSet->pNext;
4229            // Freeing layouts handled in deleteLayouts() function
4230            // Free Update shadow struct tree
4231            freeShadowUpdateTree(pFreeSet);
4232            delete pFreeSet;
4233        }
4234        delete (*ii).second;
4235    }
4236    my_data->descriptorPoolMap.clear();
4237}
4238
4239// WARN : Once deleteLayouts() called, any layout ptrs in Pool/Set data structure will be invalid
4240// NOTE : Calls to this function should be wrapped in mutex
4241static void deleteLayouts(layer_data *my_data) {
4242    if (my_data->descriptorSetLayoutMap.size() <= 0)
4243        return;
4244    for (auto ii = my_data->descriptorSetLayoutMap.begin(); ii != my_data->descriptorSetLayoutMap.end(); ++ii) {
4245        LAYOUT_NODE *pLayout = (*ii).second;
4246        if (pLayout->createInfo.pBindings) {
4247            for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
4248                delete[] pLayout->createInfo.pBindings[i].pImmutableSamplers;
4249            }
4250            delete[] pLayout->createInfo.pBindings;
4251        }
4252        delete pLayout;
4253    }
4254    my_data->descriptorSetLayoutMap.clear();
4255}
4256
4257// Currently clearing a set is removing all previous updates to that set
4258//  TODO : Validate if this is correct clearing behavior
4259static void clearDescriptorSet(layer_data *my_data, VkDescriptorSet set) {
4260    SET_NODE *pSet = getSetNode(my_data, set);
4261    if (!pSet) {
4262        // TODO : Return error
4263    } else {
4264        freeShadowUpdateTree(pSet);
4265    }
4266}
4267
4268static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
4269                                VkDescriptorPoolResetFlags flags) {
4270    DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
4271    if (!pPool) {
4272        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4273                (uint64_t)pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
4274                "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t)pool);
4275    } else {
4276        // TODO: validate flags
4277        // For every set off of this pool, clear it
4278        SET_NODE *pSet = pPool->pSets;
4279        while (pSet) {
4280            clearDescriptorSet(my_data, pSet->set);
4281            pSet = pSet->pNext;
4282        }
4283        // Reset available count to max count for this pool
4284        for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
4285            pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
4286        }
4287    }
4288}
4289
4290// For given CB object, fetch associated CB Node from map
4291static GLOBAL_CB_NODE *getCBNode(layer_data *my_data, const VkCommandBuffer cb) {
4292    if (my_data->commandBufferMap.count(cb) == 0) {
4293        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4294                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4295                "Attempt to use CommandBuffer %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
4296        return NULL;
4297    }
4298    return my_data->commandBufferMap[cb];
4299}
4300
4301// Free all CB Nodes
4302// NOTE : Calls to this function should be wrapped in mutex
4303static void deleteCommandBuffers(layer_data *my_data) {
4304    if (my_data->commandBufferMap.size() <= 0) {
4305        return;
4306    }
4307    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
4308        delete (*ii).second;
4309    }
4310    my_data->commandBufferMap.clear();
4311}
4312
4313static VkBool32 report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
4314    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4315                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
4316                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
4317}
4318
4319VkBool32 validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
4320    if (!pCB->activeRenderPass)
4321        return VK_FALSE;
4322    VkBool32 skip_call = VK_FALSE;
4323    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS && cmd_type != CMD_EXECUTECOMMANDS) {
4324        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4325                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4326                             "Commands cannot be called in a subpass using secondary command buffers.");
4327    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
4328        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4329                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4330                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
4331    }
4332    return skip_call;
4333}
4334
4335static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4336    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
4337        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4338                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4339                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4340    return false;
4341}
4342
4343static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4344    if (!(flags & VK_QUEUE_COMPUTE_BIT))
4345        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4346                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4347                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
4348    return false;
4349}
4350
4351static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4352    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
4353        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4354                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4355                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4356    return false;
4357}
4358
4359// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
4360//  in the recording state or if there's an issue with the Cmd ordering
4361static VkBool32 addCmd(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
4362    VkBool32 skipCall = VK_FALSE;
4363    auto pool_data = my_data->commandPoolMap.find(pCB->createInfo.commandPool);
4364    if (pool_data != my_data->commandPoolMap.end()) {
4365        VkQueueFlags flags = my_data->physDevProperties.queue_family_properties[pool_data->second.queueFamilyIndex].queueFlags;
4366        switch (cmd) {
4367        case CMD_BINDPIPELINE:
4368        case CMD_BINDPIPELINEDELTA:
4369        case CMD_BINDDESCRIPTORSETS:
4370        case CMD_FILLBUFFER:
4371        case CMD_CLEARCOLORIMAGE:
4372        case CMD_SETEVENT:
4373        case CMD_RESETEVENT:
4374        case CMD_WAITEVENTS:
4375        case CMD_BEGINQUERY:
4376        case CMD_ENDQUERY:
4377        case CMD_RESETQUERYPOOL:
4378        case CMD_COPYQUERYPOOLRESULTS:
4379        case CMD_WRITETIMESTAMP:
4380            skipCall |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4381            break;
4382        case CMD_SETVIEWPORTSTATE:
4383        case CMD_SETSCISSORSTATE:
4384        case CMD_SETLINEWIDTHSTATE:
4385        case CMD_SETDEPTHBIASSTATE:
4386        case CMD_SETBLENDSTATE:
4387        case CMD_SETDEPTHBOUNDSSTATE:
4388        case CMD_SETSTENCILREADMASKSTATE:
4389        case CMD_SETSTENCILWRITEMASKSTATE:
4390        case CMD_SETSTENCILREFERENCESTATE:
4391        case CMD_BINDINDEXBUFFER:
4392        case CMD_BINDVERTEXBUFFER:
4393        case CMD_DRAW:
4394        case CMD_DRAWINDEXED:
4395        case CMD_DRAWINDIRECT:
4396        case CMD_DRAWINDEXEDINDIRECT:
4397        case CMD_BLITIMAGE:
4398        case CMD_CLEARATTACHMENTS:
4399        case CMD_CLEARDEPTHSTENCILIMAGE:
4400        case CMD_RESOLVEIMAGE:
4401        case CMD_BEGINRENDERPASS:
4402        case CMD_NEXTSUBPASS:
4403        case CMD_ENDRENDERPASS:
4404            skipCall |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
4405            break;
4406        case CMD_DISPATCH:
4407        case CMD_DISPATCHINDIRECT:
4408            skipCall |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4409            break;
4410        case CMD_COPYBUFFER:
4411        case CMD_COPYIMAGE:
4412        case CMD_COPYBUFFERTOIMAGE:
4413        case CMD_COPYIMAGETOBUFFER:
4414        case CMD_CLONEIMAGEDATA:
4415        case CMD_UPDATEBUFFER:
4416        case CMD_PIPELINEBARRIER:
4417        case CMD_EXECUTECOMMANDS:
4418            break;
4419        default:
4420            break;
4421        }
4422    }
4423    if (pCB->state != CB_RECORDING) {
4424        skipCall |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
4425        skipCall |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
4426        CMD_NODE cmdNode = {};
4427        // init cmd node and append to end of cmd LL
4428        cmdNode.cmdNumber = ++pCB->numCmds;
4429        cmdNode.type = cmd;
4430        pCB->cmds.push_back(cmdNode);
4431    }
4432    return skipCall;
4433}
4434// Reset the command buffer state
4435//  Maintain the createInfo and set state to CB_NEW, but clear all other state
4436static void resetCB(layer_data *my_data, const VkCommandBuffer cb) {
4437    GLOBAL_CB_NODE *pCB = my_data->commandBufferMap[cb];
4438    if (pCB) {
4439        pCB->cmds.clear();
4440        // Reset CB state (note that createInfo is not cleared)
4441        pCB->commandBuffer = cb;
4442        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
4443        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
4444        pCB->numCmds = 0;
4445        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
4446        pCB->state = CB_NEW;
4447        pCB->submitCount = 0;
4448        pCB->status = 0;
4449        pCB->viewports.clear();
4450        pCB->scissors.clear();
4451        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4452            // Before clearing lastBoundState, remove any CB bindings from all uniqueBoundSets
4453            for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4454                auto set_node = my_data->setMap.find(set);
4455                if (set_node != my_data->setMap.end()) {
4456                    set_node->second->boundCmdBuffers.erase(pCB->commandBuffer);
4457                }
4458            }
4459            pCB->lastBound[i].reset();
4460        }
4461        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
4462        pCB->activeRenderPass = 0;
4463        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
4464        pCB->activeSubpass = 0;
4465        pCB->framebuffer = 0;
4466        pCB->fenceId = 0;
4467        pCB->lastSubmittedFence = VK_NULL_HANDLE;
4468        pCB->lastSubmittedQueue = VK_NULL_HANDLE;
4469        pCB->destroyedSets.clear();
4470        pCB->updatedSets.clear();
4471        pCB->destroyedFramebuffers.clear();
4472        pCB->waitedEvents.clear();
4473        pCB->semaphores.clear();
4474        pCB->events.clear();
4475        pCB->waitedEventsBeforeQueryReset.clear();
4476        pCB->queryToStateMap.clear();
4477        pCB->activeQueries.clear();
4478        pCB->startedQueries.clear();
4479        pCB->imageLayoutMap.clear();
4480        pCB->eventToStageMap.clear();
4481        pCB->drawData.clear();
4482        pCB->currentDrawData.buffers.clear();
4483        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
4484        pCB->secondaryCommandBuffers.clear();
4485        pCB->activeDescriptorSets.clear();
4486        pCB->validate_functions.clear();
4487        pCB->pMemObjList.clear();
4488        pCB->eventUpdates.clear();
4489    }
4490}
4491
4492// Set PSO-related status bits for CB, including dynamic state set via PSO
4493static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
4494    // Account for any dynamic state not set via this PSO
4495    if (!pPipe->dynStateCI.dynamicStateCount) { // All state is static
4496        pCB->status = CBSTATUS_ALL;
4497    } else {
4498        // First consider all state on
4499        // Then unset any state that's noted as dynamic in PSO
4500        // Finally OR that into CB statemask
4501        CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
4502        for (uint32_t i = 0; i < pPipe->dynStateCI.dynamicStateCount; i++) {
4503            switch (pPipe->dynStateCI.pDynamicStates[i]) {
4504            case VK_DYNAMIC_STATE_VIEWPORT:
4505                psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET;
4506                break;
4507            case VK_DYNAMIC_STATE_SCISSOR:
4508                psoDynStateMask &= ~CBSTATUS_SCISSOR_SET;
4509                break;
4510            case VK_DYNAMIC_STATE_LINE_WIDTH:
4511                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
4512                break;
4513            case VK_DYNAMIC_STATE_DEPTH_BIAS:
4514                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
4515                break;
4516            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
4517                psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
4518                break;
4519            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
4520                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
4521                break;
4522            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
4523                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4524                break;
4525            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4526                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4527                break;
4528            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4529                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4530                break;
4531            default:
4532                // TODO : Flag error here
4533                break;
4534            }
4535        }
4536        pCB->status |= psoDynStateMask;
4537    }
4538}
4539
4540// Print the last bound Gfx Pipeline
4541static VkBool32 printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
4542    VkBool32 skipCall = VK_FALSE;
4543    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4544    if (pCB) {
4545        PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
4546        if (!pPipeTrav) {
4547            // nothing to print
4548        } else {
4549            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4550                                __LINE__, DRAWSTATE_NONE, "DS", "%s",
4551                                vk_print_vkgraphicspipelinecreateinfo(&pPipeTrav->graphicsPipelineCI, "{DS}").c_str());
4552        }
4553    }
4554    return skipCall;
4555}
4556
4557static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
4558    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4559    if (pCB && pCB->cmds.size() > 0) {
4560        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4561                DRAWSTATE_NONE, "DS", "Cmds in CB %p", (void *)cb);
4562        vector<CMD_NODE> cmds = pCB->cmds;
4563        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
4564            // TODO : Need to pass cb as srcObj here
4565            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4566                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD#%" PRIu64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
4567        }
4568    } else {
4569        // Nothing to print
4570    }
4571}
4572
4573static VkBool32 synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
4574    VkBool32 skipCall = VK_FALSE;
4575    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
4576        return skipCall;
4577    }
4578    skipCall |= printPipeline(my_data, cb);
4579    return skipCall;
4580}
4581
4582// Flags validation error if the associated call is made inside a render pass. The apiName
4583// routine should ONLY be called outside a render pass.
4584static VkBool32 insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4585    VkBool32 inside = VK_FALSE;
4586    if (pCB->activeRenderPass) {
4587        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4588                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
4589                         "%s: It is invalid to issue this call inside an active render pass (%#" PRIxLEAST64 ")", apiName,
4590                         (uint64_t)pCB->activeRenderPass);
4591    }
4592    return inside;
4593}
4594
4595// Flags validation error if the associated call is made outside a render pass. The apiName
4596// routine should ONLY be called inside a render pass.
4597static VkBool32 outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4598    VkBool32 outside = VK_FALSE;
4599    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4600        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4601         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4602        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4603                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
4604                          "%s: This call must be issued inside an active render pass.", apiName);
4605    }
4606    return outside;
4607}
4608
4609static void init_core_validation(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
4610
4611    layer_debug_actions(my_data->report_data, my_data->logging_callback, pAllocator, "lunarg_core_validation");
4612
4613    if (!globalLockInitialized) {
4614        loader_platform_thread_create_mutex(&globalLock);
4615        globalLockInitialized = 1;
4616    }
4617#if MTMERGESOURCE
4618    // Zero out memory property data
4619    memset(&memProps, 0, sizeof(VkPhysicalDeviceMemoryProperties));
4620#endif
4621}
4622
4623VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4624vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
4625    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4626
4627    assert(chain_info->u.pLayerInfo);
4628    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4629    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4630    if (fpCreateInstance == NULL)
4631        return VK_ERROR_INITIALIZATION_FAILED;
4632
4633    // Advance the link info for the next element on the chain
4634    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4635
4636    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4637    if (result != VK_SUCCESS)
4638        return result;
4639
4640    layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
4641    my_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
4642    layer_init_instance_dispatch_table(*pInstance, my_data->instance_dispatch_table, fpGetInstanceProcAddr);
4643
4644    my_data->report_data = debug_report_create_instance(my_data->instance_dispatch_table, *pInstance,
4645                                                        pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
4646
4647    init_core_validation(my_data, pAllocator);
4648
4649    ValidateLayerOrdering(*pCreateInfo);
4650
4651    return result;
4652}
4653
4654/* hook DestroyInstance to remove tableInstanceMap entry */
4655VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
4656    // TODOSC : Shouldn't need any customization here
4657    dispatch_key key = get_dispatch_key(instance);
4658    // TBD: Need any locking this early, in case this function is called at the
4659    // same time by more than one thread?
4660    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4661    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
4662    pTable->DestroyInstance(instance, pAllocator);
4663
4664    loader_platform_thread_lock_mutex(&globalLock);
4665    // Clean up logging callback, if any
4666    while (my_data->logging_callback.size() > 0) {
4667        VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
4668        layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
4669        my_data->logging_callback.pop_back();
4670    }
4671
4672    layer_debug_report_destroy_instance(my_data->report_data);
4673    delete my_data->instance_dispatch_table;
4674    layer_data_map.erase(key);
4675    loader_platform_thread_unlock_mutex(&globalLock);
4676    if (layer_data_map.empty()) {
4677        // Release mutex when destroying last instance.
4678        loader_platform_thread_delete_mutex(&globalLock);
4679        globalLockInitialized = 0;
4680    }
4681}
4682
4683static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
4684    uint32_t i;
4685    // TBD: Need any locking, in case this function is called at the same time
4686    // by more than one thread?
4687    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4688    dev_data->device_extensions.wsi_enabled = false;
4689
4690    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4691    PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
4692    pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
4693    pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
4694    pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
4695    pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
4696    pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
4697
4698    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4699        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4700            dev_data->device_extensions.wsi_enabled = true;
4701    }
4702}
4703
4704VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4705                                                              const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4706    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4707
4708    assert(chain_info->u.pLayerInfo);
4709    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4710    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4711    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
4712    if (fpCreateDevice == NULL) {
4713        return VK_ERROR_INITIALIZATION_FAILED;
4714    }
4715
4716    // Advance the link info for the next element on the chain
4717    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4718
4719    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4720    if (result != VK_SUCCESS) {
4721        return result;
4722    }
4723
4724    loader_platform_thread_lock_mutex(&globalLock);
4725    layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
4726    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4727
4728    // Setup device dispatch table
4729    my_device_data->device_dispatch_table = new VkLayerDispatchTable;
4730    layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
4731
4732    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4733    createDeviceRegisterExtensions(pCreateInfo, *pDevice);
4734    // Get physical device limits for this device
4735    my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->physDevProperties.properties));
4736    uint32_t count;
4737    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4738    my_device_data->physDevProperties.queue_family_properties.resize(count);
4739    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
4740        gpu, &count, &my_device_data->physDevProperties.queue_family_properties[0]);
4741    // TODO: device limits should make sure these are compatible
4742    if (pCreateInfo->pEnabledFeatures) {
4743        my_device_data->physDevProperties.features = *pCreateInfo->pEnabledFeatures;
4744    } else {
4745        memset(&my_device_data->physDevProperties.features, 0, sizeof(VkPhysicalDeviceFeatures));
4746    }
4747    loader_platform_thread_unlock_mutex(&globalLock);
4748
4749    ValidateLayerOrdering(*pCreateInfo);
4750
4751    return result;
4752}
4753
4754// prototype
4755static void deleteRenderPasses(layer_data *);
4756VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4757    // TODOSC : Shouldn't need any customization here
4758    dispatch_key key = get_dispatch_key(device);
4759    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4760    // Free all the memory
4761    loader_platform_thread_lock_mutex(&globalLock);
4762    deletePipelines(dev_data);
4763    deleteRenderPasses(dev_data);
4764    deleteCommandBuffers(dev_data);
4765    deletePools(dev_data);
4766    deleteLayouts(dev_data);
4767    dev_data->imageViewMap.clear();
4768    dev_data->imageMap.clear();
4769    dev_data->imageSubresourceMap.clear();
4770    dev_data->imageLayoutMap.clear();
4771    dev_data->bufferViewMap.clear();
4772    dev_data->bufferMap.clear();
4773    loader_platform_thread_unlock_mutex(&globalLock);
4774#if MTMERGESOURCE
4775    VkBool32 skipCall = VK_FALSE;
4776    loader_platform_thread_lock_mutex(&globalLock);
4777    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4778            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4779    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4780            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4781    print_mem_list(dev_data, device);
4782    printCBList(dev_data, device);
4783    delete_cmd_buf_info_list(dev_data);
4784    // Report any memory leaks
4785    DEVICE_MEM_INFO *pInfo = NULL;
4786    if (dev_data->memObjMap.size() > 0) {
4787        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4788            pInfo = &(*ii).second;
4789            if (pInfo->allocInfo.allocationSize != 0) {
4790                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4791                skipCall |=
4792                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4793                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK,
4794                            "MEM", "Mem Object %" PRIu64 " has not been freed. You should clean up this memory by calling "
4795                                   "vkFreeMemory(%" PRIu64 ") prior to vkDestroyDevice().",
4796                            (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4797            }
4798        }
4799    }
4800    // Queues persist until device is destroyed
4801    delete_queue_info_list(dev_data);
4802    layer_debug_report_destroy_device(device);
4803    loader_platform_thread_unlock_mutex(&globalLock);
4804
4805#if DISPATCH_MAP_DEBUG
4806    fprintf(stderr, "Device: %p, key: %p\n", device, key);
4807#endif
4808    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4809    if (VK_FALSE == skipCall) {
4810        pDisp->DestroyDevice(device, pAllocator);
4811    }
4812#else
4813    dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
4814#endif
4815    delete dev_data->device_dispatch_table;
4816    layer_data_map.erase(key);
4817}
4818
4819#if MTMERGESOURCE
4820VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
4821vkGetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties *pMemoryProperties) {
4822    layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
4823    VkLayerInstanceDispatchTable *pInstanceTable = my_data->instance_dispatch_table;
4824    pInstanceTable->GetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties);
4825    memcpy(&memProps, pMemoryProperties, sizeof(VkPhysicalDeviceMemoryProperties));
4826}
4827#endif
4828
4829static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4830
4831VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4832vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
4833    return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
4834}
4835
4836VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4837vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
4838    return util_GetLayerProperties(ARRAY_SIZE(cv_global_layers), cv_global_layers, pCount, pProperties);
4839}
4840
4841// TODO: Why does this exist - can we just use global?
4842static const VkLayerProperties cv_device_layers[] = {{
4843    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
4844}};
4845
4846VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
4847                                                                                    const char *pLayerName, uint32_t *pCount,
4848                                                                                    VkExtensionProperties *pProperties) {
4849    if (pLayerName == NULL) {
4850        dispatch_key key = get_dispatch_key(physicalDevice);
4851        layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4852        return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
4853    } else {
4854        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
4855    }
4856}
4857
4858VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4859vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
4860    /* draw_state physical device layers are the same as global */
4861    return util_GetLayerProperties(ARRAY_SIZE(cv_device_layers), cv_device_layers, pCount, pProperties);
4862}
4863
4864// This validates that the initial layout specified in the command buffer for
4865// the IMAGE is the same
4866// as the global IMAGE layout
4867VkBool32 ValidateCmdBufImageLayouts(VkCommandBuffer cmdBuffer) {
4868    VkBool32 skip_call = VK_FALSE;
4869    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
4870    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
4871    for (auto cb_image_data : pCB->imageLayoutMap) {
4872        VkImageLayout imageLayout;
4873        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4874            skip_call |=
4875                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4876                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image %" PRIu64 ".",
4877                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4878        } else {
4879            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4880                // TODO: Set memory invalid which is in mem_tracker currently
4881            } else if (imageLayout != cb_image_data.second.initialLayout) {
4882                skip_call |=
4883                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4884                            reinterpret_cast<uint64_t &>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4885                            "Cannot submit cmd buffer using image (%" PRIx64 ") with layout %s when "
4886                            "first use is %s.",
4887                            reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4888                            string_VkImageLayout(cb_image_data.second.initialLayout));
4889            }
4890            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4891        }
4892    }
4893    return skip_call;
4894}
4895
4896// Track which resources are in-flight by atomically incrementing their "in_use" count
4897VkBool32 validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB) {
4898    VkBool32 skip_call = VK_FALSE;
4899    for (auto drawDataElement : pCB->drawData) {
4900        for (auto buffer : drawDataElement.buffers) {
4901            auto buffer_data = my_data->bufferMap.find(buffer);
4902            if (buffer_data == my_data->bufferMap.end()) {
4903                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4904                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4905                                     "Cannot submit cmd buffer using deleted buffer %" PRIu64 ".", (uint64_t)(buffer));
4906            } else {
4907                buffer_data->second.in_use.fetch_add(1);
4908            }
4909        }
4910    }
4911    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4912        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4913            auto setNode = my_data->setMap.find(set);
4914            if (setNode == my_data->setMap.end()) {
4915                skip_call |=
4916                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4917                            (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS",
4918                            "Cannot submit cmd buffer using deleted descriptor set %" PRIu64 ".", (uint64_t)(set));
4919            } else {
4920                setNode->second->in_use.fetch_add(1);
4921            }
4922        }
4923    }
4924    for (auto semaphore : pCB->semaphores) {
4925        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4926        if (semaphoreNode == my_data->semaphoreMap.end()) {
4927            skip_call |=
4928                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4929                        reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4930                        "Cannot submit cmd buffer using deleted semaphore %" PRIu64 ".", reinterpret_cast<uint64_t &>(semaphore));
4931        } else {
4932            semaphoreNode->second.in_use.fetch_add(1);
4933        }
4934    }
4935    for (auto event : pCB->events) {
4936        auto eventNode = my_data->eventMap.find(event);
4937        if (eventNode == my_data->eventMap.end()) {
4938            skip_call |=
4939                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4940                        reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4941                        "Cannot submit cmd buffer using deleted event %" PRIu64 ".", reinterpret_cast<uint64_t &>(event));
4942        } else {
4943            eventNode->second.in_use.fetch_add(1);
4944        }
4945    }
4946    return skip_call;
4947}
4948
4949void decrementResources(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4950    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4951    for (auto drawDataElement : pCB->drawData) {
4952        for (auto buffer : drawDataElement.buffers) {
4953            auto buffer_data = my_data->bufferMap.find(buffer);
4954            if (buffer_data != my_data->bufferMap.end()) {
4955                buffer_data->second.in_use.fetch_sub(1);
4956            }
4957        }
4958    }
4959    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4960        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4961            auto setNode = my_data->setMap.find(set);
4962            if (setNode != my_data->setMap.end()) {
4963                setNode->second->in_use.fetch_sub(1);
4964            }
4965        }
4966    }
4967    for (auto semaphore : pCB->semaphores) {
4968        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4969        if (semaphoreNode != my_data->semaphoreMap.end()) {
4970            semaphoreNode->second.in_use.fetch_sub(1);
4971        }
4972    }
4973    for (auto event : pCB->events) {
4974        auto eventNode = my_data->eventMap.find(event);
4975        if (eventNode != my_data->eventMap.end()) {
4976            eventNode->second.in_use.fetch_sub(1);
4977        }
4978    }
4979    for (auto queryStatePair : pCB->queryToStateMap) {
4980        my_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4981    }
4982    for (auto eventStagePair : pCB->eventToStageMap) {
4983        my_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4984    }
4985}
4986
4987void decrementResources(layer_data *my_data, uint32_t fenceCount, const VkFence *pFences) {
4988    for (uint32_t i = 0; i < fenceCount; ++i) {
4989        auto fence_data = my_data->fenceMap.find(pFences[i]);
4990        if (fence_data == my_data->fenceMap.end() || !fence_data->second.needsSignaled)
4991            return;
4992        fence_data->second.needsSignaled = false;
4993        fence_data->second.in_use.fetch_sub(1);
4994        decrementResources(my_data, fence_data->second.priorFences.size(), fence_data->second.priorFences.data());
4995        for (auto cmdBuffer : fence_data->second.cmdBuffers) {
4996            decrementResources(my_data, cmdBuffer);
4997        }
4998    }
4999}
5000
5001void decrementResources(layer_data *my_data, VkQueue queue) {
5002    auto queue_data = my_data->queueMap.find(queue);
5003    if (queue_data != my_data->queueMap.end()) {
5004        for (auto cmdBuffer : queue_data->second.untrackedCmdBuffers) {
5005            decrementResources(my_data, cmdBuffer);
5006        }
5007        queue_data->second.untrackedCmdBuffers.clear();
5008        decrementResources(my_data, queue_data->second.lastFences.size(), queue_data->second.lastFences.data());
5009    }
5010}
5011
5012void updateTrackedCommandBuffers(layer_data *dev_data, VkQueue queue, VkQueue other_queue, VkFence fence) {
5013    if (queue == other_queue) {
5014        return;
5015    }
5016    auto queue_data = dev_data->queueMap.find(queue);
5017    auto other_queue_data = dev_data->queueMap.find(other_queue);
5018    if (queue_data == dev_data->queueMap.end() || other_queue_data == dev_data->queueMap.end()) {
5019        return;
5020    }
5021    for (auto fence : other_queue_data->second.lastFences) {
5022        queue_data->second.lastFences.push_back(fence);
5023    }
5024    if (fence != VK_NULL_HANDLE) {
5025        auto fence_data = dev_data->fenceMap.find(fence);
5026        if (fence_data == dev_data->fenceMap.end()) {
5027            return;
5028        }
5029        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
5030            fence_data->second.cmdBuffers.push_back(cmdbuffer);
5031        }
5032        other_queue_data->second.untrackedCmdBuffers.clear();
5033    } else {
5034        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
5035            queue_data->second.untrackedCmdBuffers.push_back(cmdbuffer);
5036        }
5037        other_queue_data->second.untrackedCmdBuffers.clear();
5038    }
5039    for (auto eventStagePair : other_queue_data->second.eventToStageMap) {
5040        queue_data->second.eventToStageMap[eventStagePair.first] = eventStagePair.second;
5041    }
5042}
5043
5044void trackCommandBuffers(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
5045    auto queue_data = my_data->queueMap.find(queue);
5046    if (fence != VK_NULL_HANDLE) {
5047        vector<VkFence> prior_fences;
5048        auto fence_data = my_data->fenceMap.find(fence);
5049        if (fence_data == my_data->fenceMap.end()) {
5050            return;
5051        }
5052        if (queue_data != my_data->queueMap.end()) {
5053            prior_fences = queue_data->second.lastFences;
5054            queue_data->second.lastFences.clear();
5055            queue_data->second.lastFences.push_back(fence);
5056            for (auto cmdbuffer : queue_data->second.untrackedCmdBuffers) {
5057                fence_data->second.cmdBuffers.push_back(cmdbuffer);
5058            }
5059            queue_data->second.untrackedCmdBuffers.clear();
5060        }
5061        fence_data->second.cmdBuffers.clear();
5062        fence_data->second.priorFences = prior_fences;
5063        fence_data->second.needsSignaled = true;
5064        fence_data->second.queue = queue;
5065        fence_data->second.in_use.fetch_add(1);
5066        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5067            const VkSubmitInfo *submit = &pSubmits[submit_idx];
5068            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5069                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5070                    fence_data->second.cmdBuffers.push_back(secondaryCmdBuffer);
5071                }
5072                fence_data->second.cmdBuffers.push_back(submit->pCommandBuffers[i]);
5073            }
5074        }
5075    } else {
5076        if (queue_data != my_data->queueMap.end()) {
5077            for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5078                const VkSubmitInfo *submit = &pSubmits[submit_idx];
5079                for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5080                    for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5081                        queue_data->second.untrackedCmdBuffers.push_back(secondaryCmdBuffer);
5082                    }
5083                    queue_data->second.untrackedCmdBuffers.push_back(submit->pCommandBuffers[i]);
5084                }
5085            }
5086        }
5087    }
5088    if (queue_data != my_data->queueMap.end()) {
5089        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5090            const VkSubmitInfo *submit = &pSubmits[submit_idx];
5091            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5092                // Add cmdBuffers to both the global set and queue set
5093                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5094                    my_data->globalInFlightCmdBuffers.insert(secondaryCmdBuffer);
5095                    queue_data->second.inFlightCmdBuffers.insert(secondaryCmdBuffer);
5096                }
5097                my_data->globalInFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
5098                queue_data->second.inFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
5099            }
5100        }
5101    }
5102}
5103
5104bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5105    bool skip_call = false;
5106    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
5107        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
5108        skip_call |=
5109            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5110                    __LINE__, DRAWSTATE_INVALID_FENCE, "DS", "Command Buffer %#" PRIx64 " is already in use and is not marked "
5111                                                             "for simultaneous use.",
5112                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
5113    }
5114    return skip_call;
5115}
5116
5117static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5118    bool skipCall = false;
5119    // Validate that cmd buffers have been updated
5120    if (CB_RECORDED != pCB->state) {
5121        if (CB_INVALID == pCB->state) {
5122            // Inform app of reason CB invalid
5123            bool causeReported = false;
5124            if (!pCB->destroyedSets.empty()) {
5125                std::stringstream set_string;
5126                for (auto set : pCB->destroyedSets)
5127                    set_string << " " << set;
5128
5129                skipCall |=
5130                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5131                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5132                            "You are submitting command buffer %#" PRIxLEAST64
5133                            " that is invalid because it had the following bound descriptor set(s) destroyed: %s",
5134                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5135                causeReported = true;
5136            }
5137            if (!pCB->updatedSets.empty()) {
5138                std::stringstream set_string;
5139                for (auto set : pCB->updatedSets)
5140                    set_string << " " << set;
5141
5142                skipCall |=
5143                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5144                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5145                            "You are submitting command buffer %#" PRIxLEAST64
5146                            " that is invalid because it had the following bound descriptor set(s) updated: %s",
5147                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5148                causeReported = true;
5149            }
5150            if (!pCB->destroyedFramebuffers.empty()) {
5151                std::stringstream fb_string;
5152                for (auto fb : pCB->destroyedFramebuffers)
5153                    fb_string << " " << fb;
5154
5155                skipCall |=
5156                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5157                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5158                            "You are submitting command buffer %#" PRIxLEAST64 " that is invalid because it had the following "
5159                            "referenced framebuffers destroyed: %s",
5160                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), fb_string.str().c_str());
5161                causeReported = true;
5162            }
5163            // TODO : This is defensive programming to make sure an error is
5164            //  flagged if we hit this INVALID cmd buffer case and none of the
5165            //  above cases are hit. As the number of INVALID cases grows, this
5166            //  code should be updated to seemlessly handle all the cases.
5167            if (!causeReported) {
5168                skipCall |= log_msg(
5169                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5170                    reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5171                    "You are submitting command buffer %#" PRIxLEAST64 " that is invalid due to an unknown cause. Validation "
5172                    "should "
5173                    "be improved to report the exact cause.",
5174                    reinterpret_cast<uint64_t &>(pCB->commandBuffer));
5175            }
5176        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
5177            skipCall |=
5178                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5179                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
5180                        "You must call vkEndCommandBuffer() on CB %#" PRIxLEAST64 " before this call to vkQueueSubmit()!",
5181                        (uint64_t)(pCB->commandBuffer));
5182        }
5183    }
5184    return skipCall;
5185}
5186
5187static VkBool32 validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5188    // Track in-use for resources off of primary and any secondary CBs
5189    VkBool32 skipCall = validateAndIncrementResources(dev_data, pCB);
5190    if (!pCB->secondaryCommandBuffers.empty()) {
5191        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
5192            skipCall |= validateAndIncrementResources(dev_data, dev_data->commandBufferMap[secondaryCmdBuffer]);
5193            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
5194            if (pSubCB->primaryCommandBuffer != pCB->commandBuffer) {
5195                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5196                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5197                        "CB %#" PRIxLEAST64 " was submitted with secondary buffer %#" PRIxLEAST64
5198                        " but that buffer has subsequently been bound to "
5199                        "primary cmd buffer %#" PRIxLEAST64 ".",
5200                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
5201                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
5202            }
5203        }
5204    }
5205    // TODO : Verify if this also needs to be checked for secondary command
5206    //  buffers. If so, this block of code can move to
5207    //   validateCommandBufferState() function. vulkan GL106 filed to clarify
5208    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
5209        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5210                            __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5211                            "CB %#" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
5212                            "set, but has been submitted %#" PRIxLEAST64 " times.",
5213                            (uint64_t)(pCB->commandBuffer), pCB->submitCount);
5214    }
5215    skipCall |= validateCommandBufferState(dev_data, pCB);
5216    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
5217    // on device
5218    skipCall |= validateCommandBufferSimultaneousUse(dev_data, pCB);
5219    return skipCall;
5220}
5221
5222VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5223vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
5224    VkBool32 skipCall = VK_FALSE;
5225    GLOBAL_CB_NODE *pCBNode = NULL;
5226    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5227    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5228    loader_platform_thread_lock_mutex(&globalLock);
5229#if MTMERGESOURCE
5230    // TODO : Need to track fence and clear mem references when fence clears
5231    // MTMTODO : Merge this code with code below to avoid duplicating efforts
5232    uint64_t fenceId = 0;
5233    skipCall = add_fence_info(dev_data, fence, queue, &fenceId);
5234
5235    print_mem_list(dev_data, queue);
5236    printCBList(dev_data, queue);
5237    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5238        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5239        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5240            pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5241            if (pCBNode) {
5242                pCBNode->fenceId = fenceId;
5243                pCBNode->lastSubmittedFence = fence;
5244                pCBNode->lastSubmittedQueue = queue;
5245                for (auto &function : pCBNode->validate_functions) {
5246                    skipCall |= function();
5247                }
5248                for (auto &function : pCBNode->eventUpdates) {
5249                    skipCall |= static_cast<VkBool32>(function(queue));
5250                }
5251            }
5252        }
5253
5254        for (uint32_t i = 0; i < submit->waitSemaphoreCount; i++) {
5255            VkSemaphore sem = submit->pWaitSemaphores[i];
5256
5257            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5258                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_SIGNALLED) {
5259                    skipCall =
5260                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5261                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
5262                                "vkQueueSubmit: Semaphore must be in signaled state before passing to pWaitSemaphores");
5263                }
5264                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_WAIT;
5265            }
5266        }
5267        for (uint32_t i = 0; i < submit->signalSemaphoreCount; i++) {
5268            VkSemaphore sem = submit->pSignalSemaphores[i];
5269
5270            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5271                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
5272                    skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5273                                       VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, (uint64_t)sem, __LINE__, MEMTRACK_NONE,
5274                                       "SEMAPHORE", "vkQueueSubmit: Semaphore must not be currently signaled or in a wait state");
5275                }
5276                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
5277            }
5278        }
5279    }
5280#endif
5281    // First verify that fence is not in use
5282    if ((fence != VK_NULL_HANDLE) && (submitCount != 0) && dev_data->fenceMap[fence].in_use.load()) {
5283        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5284                            (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5285                            "Fence %#" PRIx64 " is already in use by another submission.", (uint64_t)(fence));
5286    }
5287    // Now verify each individual submit
5288    std::unordered_set<VkQueue> processed_other_queues;
5289    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5290        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5291        vector<VkSemaphore> semaphoreList;
5292        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
5293            const VkSemaphore &semaphore = submit->pWaitSemaphores[i];
5294            semaphoreList.push_back(semaphore);
5295            if (dev_data->semaphoreMap[semaphore].signaled) {
5296                dev_data->semaphoreMap[semaphore].signaled = 0;
5297            } else {
5298                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5299                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
5300                                    "DS", "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
5301                                    reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
5302            }
5303            const VkQueue &other_queue = dev_data->semaphoreMap[semaphore].queue;
5304            if (other_queue != VK_NULL_HANDLE && !processed_other_queues.count(other_queue)) {
5305                updateTrackedCommandBuffers(dev_data, queue, other_queue, fence);
5306                processed_other_queues.insert(other_queue);
5307            }
5308        }
5309        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
5310            const VkSemaphore &semaphore = submit->pSignalSemaphores[i];
5311            semaphoreList.push_back(semaphore);
5312            if (dev_data->semaphoreMap[semaphore].signaled) {
5313                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5314                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
5315                                    "DS", "Queue %#" PRIx64 " is signaling semaphore %#" PRIx64
5316                                          " that has already been signaled but not waited on by queue %#" PRIx64 ".",
5317                                    reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
5318                                    reinterpret_cast<uint64_t &>(dev_data->semaphoreMap[semaphore].queue));
5319            } else {
5320                dev_data->semaphoreMap[semaphore].signaled = 1;
5321                dev_data->semaphoreMap[semaphore].queue = queue;
5322            }
5323        }
5324        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5325            skipCall |= ValidateCmdBufImageLayouts(submit->pCommandBuffers[i]);
5326            pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5327            pCBNode->semaphores = semaphoreList;
5328            pCBNode->submitCount++; // increment submit count
5329            skipCall |= validatePrimaryCommandBufferState(dev_data, pCBNode);
5330        }
5331    }
5332    // Update cmdBuffer-related data structs and mark fence in-use
5333    trackCommandBuffers(dev_data, queue, submitCount, pSubmits, fence);
5334    loader_platform_thread_unlock_mutex(&globalLock);
5335    if (VK_FALSE == skipCall)
5336        result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence);
5337#if MTMERGESOURCE
5338    loader_platform_thread_lock_mutex(&globalLock);
5339    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5340        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5341        for (uint32_t i = 0; i < submit->waitSemaphoreCount; i++) {
5342            VkSemaphore sem = submit->pWaitSemaphores[i];
5343
5344            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5345                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
5346            }
5347        }
5348    }
5349    loader_platform_thread_unlock_mutex(&globalLock);
5350#endif
5351    return result;
5352}
5353
5354#if MTMERGESOURCE
5355VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
5356                                                                const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
5357    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5358    VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
5359    // TODO : Track allocations and overall size here
5360    loader_platform_thread_lock_mutex(&globalLock);
5361    add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
5362    print_mem_list(my_data, device);
5363    loader_platform_thread_unlock_mutex(&globalLock);
5364    return result;
5365}
5366
5367VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5368vkFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
5369    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5370
5371    // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
5372    // Before freeing a memory object, an application must ensure the memory object is no longer
5373    // in use by the device—for example by command buffers queued for execution. The memory need
5374    // not yet be unbound from all images and buffers, but any further use of those images or
5375    // buffers (on host or device) for anything other than destroying those objects will result in
5376    // undefined behavior.
5377
5378    loader_platform_thread_lock_mutex(&globalLock);
5379    freeMemObjInfo(my_data, device, mem, VK_FALSE);
5380    print_mem_list(my_data, device);
5381    printCBList(my_data, device);
5382    loader_platform_thread_unlock_mutex(&globalLock);
5383    my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator);
5384}
5385
5386VkBool32 validateMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5387    VkBool32 skipCall = VK_FALSE;
5388
5389    if (size == 0) {
5390        // TODO: a size of 0 is not listed as an invalid use in the spec, should it be?
5391        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5392                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5393                           "VkMapMemory: Attempting to map memory range of size zero");
5394    }
5395
5396    auto mem_element = my_data->memObjMap.find(mem);
5397    if (mem_element != my_data->memObjMap.end()) {
5398        // It is an application error to call VkMapMemory on an object that is already mapped
5399        if (mem_element->second.memRange.size != 0) {
5400            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5401                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5402                               "VkMapMemory: Attempting to map memory on an already-mapped object %#" PRIxLEAST64, (uint64_t)mem);
5403        }
5404
5405        // Validate that offset + size is within object's allocationSize
5406        if (size == VK_WHOLE_SIZE) {
5407            if (offset >= mem_element->second.allocInfo.allocationSize) {
5408                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5409                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5410                                   "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5411                                   mem_element->second.allocInfo.allocationSize, mem_element->second.allocInfo.allocationSize);
5412            }
5413        } else {
5414            if ((offset + size) > mem_element->second.allocInfo.allocationSize) {
5415                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5416                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5417                                   "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5418                                   size + offset, mem_element->second.allocInfo.allocationSize);
5419            }
5420        }
5421    }
5422    return skipCall;
5423}
5424
5425void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5426    auto mem_element = my_data->memObjMap.find(mem);
5427    if (mem_element != my_data->memObjMap.end()) {
5428        MemRange new_range;
5429        new_range.offset = offset;
5430        new_range.size = size;
5431        mem_element->second.memRange = new_range;
5432    }
5433}
5434
5435VkBool32 deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
5436    VkBool32 skipCall = VK_FALSE;
5437    auto mem_element = my_data->memObjMap.find(mem);
5438    if (mem_element != my_data->memObjMap.end()) {
5439        if (!mem_element->second.memRange.size) {
5440            // Valid Usage: memory must currently be mapped
5441            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5442                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5443                               "Unmapping Memory without memory being mapped: mem obj %#" PRIxLEAST64, (uint64_t)mem);
5444        }
5445        mem_element->second.memRange.size = 0;
5446        if (mem_element->second.pData) {
5447            free(mem_element->second.pData);
5448            mem_element->second.pData = 0;
5449        }
5450    }
5451    return skipCall;
5452}
5453
5454static char NoncoherentMemoryFillValue = 0xb;
5455
5456void initializeAndTrackMemory(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize size, void **ppData) {
5457    auto mem_element = my_data->memObjMap.find(mem);
5458    if (mem_element != my_data->memObjMap.end()) {
5459        mem_element->second.pDriverData = *ppData;
5460        uint32_t index = mem_element->second.allocInfo.memoryTypeIndex;
5461        if (memProps.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
5462            mem_element->second.pData = 0;
5463        } else {
5464            if (size == VK_WHOLE_SIZE) {
5465                size = mem_element->second.allocInfo.allocationSize;
5466            }
5467            size_t convSize = (size_t)(size);
5468            mem_element->second.pData = malloc(2 * convSize);
5469            memset(mem_element->second.pData, NoncoherentMemoryFillValue, 2 * convSize);
5470            *ppData = static_cast<char *>(mem_element->second.pData) + (convSize / 2);
5471        }
5472    }
5473}
5474#endif
5475// Note: This function assumes that the global lock is held by the calling
5476// thread.
5477VkBool32 cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
5478    VkBool32 skip_call = VK_FALSE;
5479    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
5480    if (pCB) {
5481        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
5482            for (auto event : queryEventsPair.second) {
5483                if (my_data->eventMap[event].needsSignaled) {
5484                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5485                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
5486                                         "Cannot get query results on queryPool %" PRIu64
5487                                         " with index %d which was guarded by unsignaled event %" PRIu64 ".",
5488                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
5489                }
5490            }
5491        }
5492    }
5493    return skip_call;
5494}
5495// Remove given cmd_buffer from the global inFlight set.
5496//  Also, if given queue is valid, then remove the cmd_buffer from that queues
5497//  inFlightCmdBuffer set. Finally, check all other queues and if given cmd_buffer
5498//  is still in flight on another queue, add it back into the global set.
5499// Note: This function assumes that the global lock is held by the calling
5500// thread.
5501static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkQueue queue) {
5502    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
5503    dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
5504    if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) {
5505        dev_data->queueMap[queue].inFlightCmdBuffers.erase(cmd_buffer);
5506        for (auto q : dev_data->queues) {
5507            if ((q != queue) &&
5508                (dev_data->queueMap[q].inFlightCmdBuffers.find(cmd_buffer) != dev_data->queueMap[q].inFlightCmdBuffers.end())) {
5509                dev_data->globalInFlightCmdBuffers.insert(cmd_buffer);
5510                break;
5511            }
5512        }
5513    }
5514}
5515#if MTMERGESOURCE
5516static inline bool verifyFenceStatus(VkDevice device, VkFence fence, const char *apiCall) {
5517    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5518    VkBool32 skipCall = false;
5519    auto pFenceInfo = my_data->fenceMap.find(fence);
5520    if (pFenceInfo != my_data->fenceMap.end()) {
5521        if (pFenceInfo->second.firstTimeFlag != VK_TRUE) {
5522            if ((pFenceInfo->second.createInfo.flags & VK_FENCE_CREATE_SIGNALED_BIT) &&
5523                pFenceInfo->second.firstTimeFlag != VK_TRUE) {
5524                skipCall |=
5525                    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5526                            (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5527                            "%s specified fence %#" PRIxLEAST64 " already in SIGNALED state.", apiCall, (uint64_t)fence);
5528            }
5529            if (!pFenceInfo->second.queue && !pFenceInfo->second.swapchain) { // Checking status of unsubmitted fence
5530                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5531                                    reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5532                                    "%s called for fence %#" PRIxLEAST64 " which has not been submitted on a Queue or during "
5533                                    "acquire next image.",
5534                                    apiCall, reinterpret_cast<uint64_t &>(fence));
5535            }
5536        } else {
5537            pFenceInfo->second.firstTimeFlag = VK_FALSE;
5538        }
5539    }
5540    return skipCall;
5541}
5542#endif
5543VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5544vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
5545    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5546    VkBool32 skip_call = VK_FALSE;
5547#if MTMERGESOURCE
5548    // Verify fence status of submitted fences
5549    loader_platform_thread_lock_mutex(&globalLock);
5550    for (uint32_t i = 0; i < fenceCount; i++) {
5551        skip_call |= verifyFenceStatus(device, pFences[i], "vkWaitForFences");
5552    }
5553    loader_platform_thread_unlock_mutex(&globalLock);
5554    if (skip_call)
5555        return VK_ERROR_VALIDATION_FAILED_EXT;
5556#endif
5557    VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
5558
5559    if (result == VK_SUCCESS) {
5560        loader_platform_thread_lock_mutex(&globalLock);
5561        // When we know that all fences are complete we can clean/remove their CBs
5562        if (waitAll || fenceCount == 1) {
5563            for (uint32_t i = 0; i < fenceCount; ++i) {
5564#if MTMERGESOURCE
5565                update_fence_tracking(dev_data, pFences[i]);
5566#endif
5567                VkQueue fence_queue = dev_data->fenceMap[pFences[i]].queue;
5568                for (auto cmdBuffer : dev_data->fenceMap[pFences[i]].cmdBuffers) {
5569                    skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5570                    removeInFlightCmdBuffer(dev_data, cmdBuffer, fence_queue);
5571                }
5572            }
5573            decrementResources(dev_data, fenceCount, pFences);
5574        }
5575        // NOTE : Alternate case not handled here is when some fences have completed. In
5576        //  this case for app to guarantee which fences completed it will have to call
5577        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
5578        loader_platform_thread_unlock_mutex(&globalLock);
5579    }
5580    if (VK_FALSE != skip_call)
5581        return VK_ERROR_VALIDATION_FAILED_EXT;
5582    return result;
5583}
5584
5585VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device, VkFence fence) {
5586    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5587    bool skipCall = false;
5588    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5589#if MTMERGESOURCE
5590    loader_platform_thread_lock_mutex(&globalLock);
5591    skipCall = verifyFenceStatus(device, fence, "vkGetFenceStatus");
5592    loader_platform_thread_unlock_mutex(&globalLock);
5593    if (skipCall)
5594        return result;
5595#endif
5596    result = dev_data->device_dispatch_table->GetFenceStatus(device, fence);
5597    VkBool32 skip_call = VK_FALSE;
5598    loader_platform_thread_lock_mutex(&globalLock);
5599    if (result == VK_SUCCESS) {
5600#if MTMERGESOURCE
5601        update_fence_tracking(dev_data, fence);
5602#endif
5603        auto fence_queue = dev_data->fenceMap[fence].queue;
5604        for (auto cmdBuffer : dev_data->fenceMap[fence].cmdBuffers) {
5605            skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5606            removeInFlightCmdBuffer(dev_data, cmdBuffer, fence_queue);
5607        }
5608        decrementResources(dev_data, 1, &fence);
5609    }
5610    loader_platform_thread_unlock_mutex(&globalLock);
5611    if (VK_FALSE != skip_call)
5612        return VK_ERROR_VALIDATION_FAILED_EXT;
5613    return result;
5614}
5615
5616VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5617vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
5618    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5619    dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
5620    loader_platform_thread_lock_mutex(&globalLock);
5621    dev_data->queues.push_back(*pQueue);
5622    QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
5623    pQNode->device = device;
5624#if MTMERGESOURCE
5625    pQNode->lastRetiredId = 0;
5626    pQNode->lastSubmittedId = 0;
5627#endif
5628    loader_platform_thread_unlock_mutex(&globalLock);
5629}
5630
5631VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue) {
5632    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5633    decrementResources(dev_data, queue);
5634    VkBool32 skip_call = VK_FALSE;
5635    loader_platform_thread_lock_mutex(&globalLock);
5636    // Iterate over local set since we erase set members as we go in for loop
5637    auto local_cb_set = dev_data->queueMap[queue].inFlightCmdBuffers;
5638    for (auto cmdBuffer : local_cb_set) {
5639        skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5640        removeInFlightCmdBuffer(dev_data, cmdBuffer, queue);
5641    }
5642    dev_data->queueMap[queue].inFlightCmdBuffers.clear();
5643    loader_platform_thread_unlock_mutex(&globalLock);
5644    if (VK_FALSE != skip_call)
5645        return VK_ERROR_VALIDATION_FAILED_EXT;
5646    VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue);
5647#if MTMERGESOURCE
5648    if (VK_SUCCESS == result) {
5649        loader_platform_thread_lock_mutex(&globalLock);
5650        retire_queue_fences(dev_data, queue);
5651        loader_platform_thread_unlock_mutex(&globalLock);
5652    }
5653#endif
5654    return result;
5655}
5656
5657VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device) {
5658    VkBool32 skip_call = VK_FALSE;
5659    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5660    loader_platform_thread_lock_mutex(&globalLock);
5661    for (auto queue : dev_data->queues) {
5662        decrementResources(dev_data, queue);
5663        if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) {
5664            // Clear all of the queue inFlightCmdBuffers (global set cleared below)
5665            dev_data->queueMap[queue].inFlightCmdBuffers.clear();
5666        }
5667    }
5668    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5669        skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5670    }
5671    dev_data->globalInFlightCmdBuffers.clear();
5672    loader_platform_thread_unlock_mutex(&globalLock);
5673    if (VK_FALSE != skip_call)
5674        return VK_ERROR_VALIDATION_FAILED_EXT;
5675    VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device);
5676#if MTMERGESOURCE
5677    if (VK_SUCCESS == result) {
5678        loader_platform_thread_lock_mutex(&globalLock);
5679        retire_device_fences(dev_data, device);
5680        loader_platform_thread_unlock_mutex(&globalLock);
5681    }
5682#endif
5683    return result;
5684}
5685
5686VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
5687    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5688    bool skipCall = false;
5689    loader_platform_thread_lock_mutex(&globalLock);
5690    if (dev_data->fenceMap[fence].in_use.load()) {
5691        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5692                            (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5693                            "Fence %#" PRIx64 " is in use by a command buffer.", (uint64_t)(fence));
5694    }
5695#if MTMERGESOURCE
5696    delete_fence_info(dev_data, fence);
5697    auto item = dev_data->fenceMap.find(fence);
5698    if (item != dev_data->fenceMap.end()) {
5699        dev_data->fenceMap.erase(item);
5700    }
5701#endif
5702    loader_platform_thread_unlock_mutex(&globalLock);
5703    if (!skipCall)
5704        dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
5705}
5706
5707VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5708vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5709    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5710    dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
5711    loader_platform_thread_lock_mutex(&globalLock);
5712    auto item = dev_data->semaphoreMap.find(semaphore);
5713    if (item != dev_data->semaphoreMap.end()) {
5714        if (item->second.in_use.load()) {
5715            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5716                    reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
5717                    "Cannot delete semaphore %" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore));
5718        }
5719        dev_data->semaphoreMap.erase(semaphore);
5720    }
5721    loader_platform_thread_unlock_mutex(&globalLock);
5722    // TODO : Clean up any internal data structures using this obj.
5723}
5724
5725VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5726    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5727    bool skip_call = false;
5728    loader_platform_thread_lock_mutex(&globalLock);
5729    auto event_data = dev_data->eventMap.find(event);
5730    if (event_data != dev_data->eventMap.end()) {
5731        if (event_data->second.in_use.load()) {
5732            skip_call |= log_msg(
5733                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5734                reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
5735                "Cannot delete event %" PRIx64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event));
5736        }
5737        dev_data->eventMap.erase(event_data);
5738    }
5739    loader_platform_thread_unlock_mutex(&globalLock);
5740    if (!skip_call)
5741        dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator);
5742    // TODO : Clean up any internal data structures using this obj.
5743}
5744
5745VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5746vkDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5747    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5748        ->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
5749    // TODO : Clean up any internal data structures using this obj.
5750}
5751
5752VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
5753                                                     uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
5754                                                     VkQueryResultFlags flags) {
5755    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5756    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
5757    GLOBAL_CB_NODE *pCB = nullptr;
5758    loader_platform_thread_lock_mutex(&globalLock);
5759    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5760        pCB = getCBNode(dev_data, cmdBuffer);
5761        for (auto queryStatePair : pCB->queryToStateMap) {
5762            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
5763        }
5764    }
5765    VkBool32 skip_call = VK_FALSE;
5766    for (uint32_t i = 0; i < queryCount; ++i) {
5767        QueryObject query = {queryPool, firstQuery + i};
5768        auto queryElement = queriesInFlight.find(query);
5769        auto queryToStateElement = dev_data->queryToStateMap.find(query);
5770        if (queryToStateElement != dev_data->queryToStateMap.end()) {
5771        }
5772        // Available and in flight
5773        if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5774            queryToStateElement->second) {
5775            for (auto cmdBuffer : queryElement->second) {
5776                pCB = getCBNode(dev_data, cmdBuffer);
5777                auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
5778                if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
5779                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5780                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5781                                         "Cannot get query results on queryPool %" PRIu64 " with index %d which is in flight.",
5782                                         (uint64_t)(queryPool), firstQuery + i);
5783                } else {
5784                    for (auto event : queryEventElement->second) {
5785                        dev_data->eventMap[event].needsSignaled = true;
5786                    }
5787                }
5788            }
5789            // Unavailable and in flight
5790        } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5791                   !queryToStateElement->second) {
5792            // TODO : Can there be the same query in use by multiple command buffers in flight?
5793            bool make_available = false;
5794            for (auto cmdBuffer : queryElement->second) {
5795                pCB = getCBNode(dev_data, cmdBuffer);
5796                make_available |= pCB->queryToStateMap[query];
5797            }
5798            if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5799                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5800                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5801                                     "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5802                                     (uint64_t)(queryPool), firstQuery + i);
5803            }
5804            // Unavailable
5805        } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5806            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT,
5807                                 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5808                                 "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5809                                 (uint64_t)(queryPool), firstQuery + i);
5810            // Unitialized
5811        } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5812            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT,
5813                                 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5814                                 "Cannot get query results on queryPool %" PRIu64 " with index %d as data has not been collected for this index.",
5815                                 (uint64_t)(queryPool), firstQuery + i);
5816        }
5817    }
5818    loader_platform_thread_unlock_mutex(&globalLock);
5819    if (skip_call)
5820        return VK_ERROR_VALIDATION_FAILED_EXT;
5821    return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride,
5822                                                                flags);
5823}
5824
5825VkBool32 validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5826    VkBool32 skip_call = VK_FALSE;
5827    auto buffer_data = my_data->bufferMap.find(buffer);
5828    if (buffer_data == my_data->bufferMap.end()) {
5829        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5830                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5831                             "Cannot free buffer %" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5832    } else {
5833        if (buffer_data->second.in_use.load()) {
5834            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5835                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5836                                 "Cannot free buffer %" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5837        }
5838    }
5839    return skip_call;
5840}
5841
5842VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5843vkDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
5844    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5845    VkBool32 skipCall = VK_FALSE;
5846    loader_platform_thread_lock_mutex(&globalLock);
5847#if MTMERGESOURCE
5848    auto item = dev_data->bufferBindingMap.find((uint64_t)buffer);
5849    if (item != dev_data->bufferBindingMap.end()) {
5850        skipCall = clear_object_binding(dev_data, device, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5851        dev_data->bufferBindingMap.erase(item);
5852    }
5853#endif
5854    if (!validateIdleBuffer(dev_data, buffer) && (VK_FALSE == skipCall)) {
5855        loader_platform_thread_unlock_mutex(&globalLock);
5856        dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator);
5857        loader_platform_thread_lock_mutex(&globalLock);
5858    }
5859    dev_data->bufferMap.erase(buffer);
5860    loader_platform_thread_unlock_mutex(&globalLock);
5861}
5862
5863VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5864vkDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5865    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5866    dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
5867    loader_platform_thread_lock_mutex(&globalLock);
5868    auto item = dev_data->bufferViewMap.find(bufferView);
5869    if (item != dev_data->bufferViewMap.end()) {
5870        dev_data->bufferViewMap.erase(item);
5871    }
5872    loader_platform_thread_unlock_mutex(&globalLock);
5873}
5874
5875VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5876    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5877    VkBool32 skipCall = VK_FALSE;
5878#if MTMERGESOURCE
5879    loader_platform_thread_lock_mutex(&globalLock);
5880    auto item = dev_data->imageBindingMap.find((uint64_t)image);
5881    if (item != dev_data->imageBindingMap.end()) {
5882        skipCall = clear_object_binding(dev_data, device, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5883        dev_data->imageBindingMap.erase(item);
5884    }
5885    loader_platform_thread_unlock_mutex(&globalLock);
5886#endif
5887    if (VK_FALSE == skipCall)
5888        dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
5889
5890    loader_platform_thread_lock_mutex(&globalLock);
5891    const auto& entry = dev_data->imageMap.find(image);
5892    if (entry != dev_data->imageMap.end()) {
5893        // Clear any memory mapping for this image
5894        const auto &mem_entry = dev_data->memObjMap.find(entry->second.mem);
5895        if (mem_entry != dev_data->memObjMap.end())
5896            mem_entry->second.image = VK_NULL_HANDLE;
5897
5898        // Remove image from imageMap
5899        dev_data->imageMap.erase(entry);
5900    }
5901    const auto& subEntry = dev_data->imageSubresourceMap.find(image);
5902    if (subEntry != dev_data->imageSubresourceMap.end()) {
5903        for (const auto& pair : subEntry->second) {
5904            dev_data->imageLayoutMap.erase(pair);
5905        }
5906        dev_data->imageSubresourceMap.erase(subEntry);
5907    }
5908    loader_platform_thread_unlock_mutex(&globalLock);
5909}
5910#if MTMERGESOURCE
5911VkBool32 print_memory_range_error(layer_data *dev_data, const uint64_t object_handle, const uint64_t other_handle,
5912                                  VkDebugReportObjectTypeEXT object_type) {
5913    if (object_type == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) {
5914        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5915                       MEMTRACK_INVALID_ALIASING, "MEM", "Buffer %" PRIx64 " is alised with image %" PRIx64, object_handle,
5916                       other_handle);
5917    } else {
5918        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5919                       MEMTRACK_INVALID_ALIASING, "MEM", "Image %" PRIx64 " is alised with buffer %" PRIx64, object_handle,
5920                       other_handle);
5921    }
5922}
5923
5924VkBool32 validate_memory_range(layer_data *dev_data, const vector<MEMORY_RANGE> &ranges, const MEMORY_RANGE &new_range,
5925                               VkDebugReportObjectTypeEXT object_type) {
5926    VkBool32 skip_call = false;
5927
5928    for (auto range : ranges) {
5929        if ((range.end & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)) <
5930            (new_range.start & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)))
5931            continue;
5932        if ((range.start & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)) >
5933            (new_range.end & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)))
5934            continue;
5935        skip_call |= print_memory_range_error(dev_data, new_range.handle, range.handle, object_type);
5936    }
5937    return skip_call;
5938}
5939
5940VkBool32 validate_buffer_image_aliasing(layer_data *dev_data, uint64_t handle, VkDeviceMemory mem, VkDeviceSize memoryOffset,
5941                                        VkMemoryRequirements memRequirements, vector<MEMORY_RANGE> &ranges,
5942                                        const vector<MEMORY_RANGE> &other_ranges, VkDebugReportObjectTypeEXT object_type) {
5943    MEMORY_RANGE range;
5944    range.handle = handle;
5945    range.memory = mem;
5946    range.start = memoryOffset;
5947    range.end = memoryOffset + memRequirements.size - 1;
5948    ranges.push_back(range);
5949    return validate_memory_range(dev_data, other_ranges, range, object_type);
5950}
5951
5952VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5953vkBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5954    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5955    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5956    loader_platform_thread_lock_mutex(&globalLock);
5957    // Track objects tied to memory
5958    uint64_t buffer_handle = (uint64_t)(buffer);
5959    VkBool32 skipCall =
5960        set_mem_binding(dev_data, device, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5961    add_object_binding_info(dev_data, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, mem);
5962    {
5963        VkMemoryRequirements memRequirements;
5964        // MTMTODO : Shouldn't this call down the chain?
5965        vkGetBufferMemoryRequirements(device, buffer, &memRequirements);
5966        skipCall |= validate_buffer_image_aliasing(dev_data, buffer_handle, mem, memoryOffset, memRequirements,
5967                                                   dev_data->memObjMap[mem].bufferRanges, dev_data->memObjMap[mem].imageRanges,
5968                                                   VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5969    }
5970    print_mem_list(dev_data, device);
5971    loader_platform_thread_unlock_mutex(&globalLock);
5972    if (VK_FALSE == skipCall) {
5973        result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset);
5974    }
5975    return result;
5976}
5977
5978VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5979vkGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5980    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5981    // TODO : What to track here?
5982    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
5983    my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5984}
5985
5986VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5987vkGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5988    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5989    // TODO : What to track here?
5990    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
5991    my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements);
5992}
5993#endif
5994VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5995vkDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5996    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5997        ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
5998    // TODO : Clean up any internal data structures using this obj.
5999}
6000
6001VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6002vkDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
6003    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6004
6005    loader_platform_thread_lock_mutex(&globalLock);
6006
6007    my_data->shaderModuleMap.erase(shaderModule);
6008
6009    loader_platform_thread_unlock_mutex(&globalLock);
6010
6011    my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
6012}
6013
6014VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6015vkDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
6016    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator);
6017    // TODO : Clean up any internal data structures using this obj.
6018}
6019
6020VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6021vkDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
6022    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6023        ->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
6024    // TODO : Clean up any internal data structures using this obj.
6025}
6026
6027VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6028vkDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
6029    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator);
6030    // TODO : Clean up any internal data structures using this obj.
6031}
6032
6033VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6034vkDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
6035    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6036        ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
6037    // TODO : Clean up any internal data structures using this obj.
6038}
6039
6040VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6041vkDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
6042    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6043        ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
6044    // TODO : Clean up any internal data structures using this obj.
6045}
6046
6047VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6048vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
6049    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6050
6051    bool skip_call = false;
6052    loader_platform_thread_lock_mutex(&globalLock);
6053    for (uint32_t i = 0; i < commandBufferCount; i++) {
6054#if MTMERGESOURCE
6055        clear_cmd_buf_and_mem_references(dev_data, pCommandBuffers[i]);
6056#endif
6057        if (dev_data->globalInFlightCmdBuffers.count(pCommandBuffers[i])) {
6058            skip_call |=
6059                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6060                        reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6061                        "Attempt to free command buffer (%#" PRIxLEAST64 ") which is in use.",
6062                        reinterpret_cast<uint64_t>(pCommandBuffers[i]));
6063        }
6064        // Delete CB information structure, and remove from commandBufferMap
6065        auto cb = dev_data->commandBufferMap.find(pCommandBuffers[i]);
6066        if (cb != dev_data->commandBufferMap.end()) {
6067            // reset prior to delete for data clean-up
6068            resetCB(dev_data, (*cb).second->commandBuffer);
6069            delete (*cb).second;
6070            dev_data->commandBufferMap.erase(cb);
6071        }
6072
6073        // Remove commandBuffer reference from commandPoolMap
6074        dev_data->commandPoolMap[commandPool].commandBuffers.remove(pCommandBuffers[i]);
6075    }
6076#if MTMERGESOURCE
6077    printCBList(dev_data, device);
6078#endif
6079    loader_platform_thread_unlock_mutex(&globalLock);
6080
6081    if (!skip_call)
6082        dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
6083}
6084
6085VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
6086                                                                   const VkAllocationCallbacks *pAllocator,
6087                                                                   VkCommandPool *pCommandPool) {
6088    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6089
6090    VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
6091
6092    if (VK_SUCCESS == result) {
6093        loader_platform_thread_lock_mutex(&globalLock);
6094        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
6095        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
6096        loader_platform_thread_unlock_mutex(&globalLock);
6097    }
6098    return result;
6099}
6100
6101VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
6102                                                                 const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
6103
6104    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6105    VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
6106    if (result == VK_SUCCESS) {
6107        loader_platform_thread_lock_mutex(&globalLock);
6108        dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo;
6109        loader_platform_thread_unlock_mutex(&globalLock);
6110    }
6111    return result;
6112}
6113
6114VkBool32 validateCommandBuffersNotInUse(const layer_data *dev_data, VkCommandPool commandPool) {
6115    VkBool32 skipCall = VK_FALSE;
6116    auto pool_data = dev_data->commandPoolMap.find(commandPool);
6117    if (pool_data != dev_data->commandPoolMap.end()) {
6118        for (auto cmdBuffer : pool_data->second.commandBuffers) {
6119            if (dev_data->globalInFlightCmdBuffers.count(cmdBuffer)) {
6120                skipCall |=
6121                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT,
6122                            (uint64_t)(commandPool), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
6123                            "Cannot reset command pool %" PRIx64 " when allocated command buffer %" PRIx64 " is in use.",
6124                            (uint64_t)(commandPool), (uint64_t)(cmdBuffer));
6125            }
6126        }
6127    }
6128    return skipCall;
6129}
6130
6131// Destroy commandPool along with all of the commandBuffers allocated from that pool
6132VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6133vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
6134    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6135    bool commandBufferComplete = false;
6136    bool skipCall = false;
6137    loader_platform_thread_lock_mutex(&globalLock);
6138#if MTMERGESOURCE
6139    // Verify that command buffers in pool are complete (not in-flight)
6140    // MTMTODO : Merge this with code below (separate *NotInUse() call)
6141    for (auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6142         it != dev_data->commandPoolMap[commandPool].commandBuffers.end(); it++) {
6143        commandBufferComplete = VK_FALSE;
6144        skipCall = checkCBCompleted(dev_data, *it, &commandBufferComplete);
6145        if (VK_FALSE == commandBufferComplete) {
6146            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6147                                (uint64_t)(*it), __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6148                                "Destroying Command Pool 0x%" PRIxLEAST64 " before "
6149                                "its command buffer (0x%" PRIxLEAST64 ") has completed.",
6150                                (uint64_t)(commandPool), reinterpret_cast<uint64_t>(*it));
6151        }
6152    }
6153#endif
6154    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandPoolMap
6155    if (dev_data->commandPoolMap.find(commandPool) != dev_data->commandPoolMap.end()) {
6156        for (auto poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6157             poolCb != dev_data->commandPoolMap[commandPool].commandBuffers.end();) {
6158            auto del_cb = dev_data->commandBufferMap.find(*poolCb);
6159            delete (*del_cb).second;                  // delete CB info structure
6160            dev_data->commandBufferMap.erase(del_cb); // Remove this command buffer
6161            poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.erase(
6162                poolCb); // Remove CB reference from commandPoolMap's list
6163        }
6164    }
6165    dev_data->commandPoolMap.erase(commandPool);
6166
6167    loader_platform_thread_unlock_mutex(&globalLock);
6168
6169    if (VK_TRUE == validateCommandBuffersNotInUse(dev_data, commandPool))
6170        return;
6171
6172    if (!skipCall)
6173        dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
6174#if MTMERGESOURCE
6175    loader_platform_thread_lock_mutex(&globalLock);
6176    auto item = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6177    // Remove command buffers from command buffer map
6178    while (item != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6179        auto del_item = item++;
6180        delete_cmd_buf_info(dev_data, commandPool, *del_item);
6181    }
6182    dev_data->commandPoolMap.erase(commandPool);
6183    loader_platform_thread_unlock_mutex(&globalLock);
6184#endif
6185}
6186
6187VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6188vkResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
6189    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6190    bool commandBufferComplete = false;
6191    bool skipCall = false;
6192    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6193#if MTMERGESOURCE
6194    // MTMTODO : Merge this with *NotInUse() call below
6195    loader_platform_thread_lock_mutex(&globalLock);
6196    auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6197    // Verify that CB's in pool are complete (not in-flight)
6198    while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6199        skipCall = checkCBCompleted(dev_data, (*it), &commandBufferComplete);
6200        if (!commandBufferComplete) {
6201            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6202                                (uint64_t)(*it), __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6203                                "Resetting CB %p before it has completed. You must check CB "
6204                                "flag before calling vkResetCommandBuffer().",
6205                                (*it));
6206        } else {
6207            // Clear memory references at this point.
6208            clear_cmd_buf_and_mem_references(dev_data, (*it));
6209        }
6210        ++it;
6211    }
6212    loader_platform_thread_unlock_mutex(&globalLock);
6213#endif
6214    if (VK_TRUE == validateCommandBuffersNotInUse(dev_data, commandPool))
6215        return VK_ERROR_VALIDATION_FAILED_EXT;
6216
6217    if (!skipCall)
6218        result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
6219
6220    // Reset all of the CBs allocated from this pool
6221    if (VK_SUCCESS == result) {
6222        loader_platform_thread_lock_mutex(&globalLock);
6223        auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6224        while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6225            resetCB(dev_data, (*it));
6226            ++it;
6227        }
6228        loader_platform_thread_unlock_mutex(&globalLock);
6229    }
6230    return result;
6231}
6232
6233VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
6234    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6235    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6236    bool skipCall = false;
6237    loader_platform_thread_lock_mutex(&globalLock);
6238    for (uint32_t i = 0; i < fenceCount; ++i) {
6239#if MTMERGESOURCE
6240        // Reset fence state in fenceCreateInfo structure
6241        // MTMTODO : Merge with code below
6242        auto fence_item = dev_data->fenceMap.find(pFences[i]);
6243        if (fence_item != dev_data->fenceMap.end()) {
6244            // Validate fences in SIGNALED state
6245            if (!(fence_item->second.createInfo.flags & VK_FENCE_CREATE_SIGNALED_BIT)) {
6246                // TODO: I don't see a Valid Usage section for ResetFences. This behavior should be documented there.
6247                skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6248                                   (uint64_t)pFences[i], __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
6249                                   "Fence %#" PRIxLEAST64 " submitted to VkResetFences in UNSIGNALED STATE", (uint64_t)pFences[i]);
6250            } else {
6251                fence_item->second.createInfo.flags =
6252                    static_cast<VkFenceCreateFlags>(fence_item->second.createInfo.flags & ~VK_FENCE_CREATE_SIGNALED_BIT);
6253            }
6254        }
6255#endif
6256        if (dev_data->fenceMap[pFences[i]].in_use.load()) {
6257            skipCall |=
6258                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6259                        reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
6260                        "Fence %#" PRIx64 " is in use by a command buffer.", reinterpret_cast<const uint64_t &>(pFences[i]));
6261        }
6262    }
6263    loader_platform_thread_unlock_mutex(&globalLock);
6264    if (!skipCall)
6265        result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences);
6266    return result;
6267}
6268
6269VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6270vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
6271    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6272    auto fbNode = dev_data->frameBufferMap.find(framebuffer);
6273    if (fbNode != dev_data->frameBufferMap.end()) {
6274        for (auto cb : fbNode->second.referencingCmdBuffers) {
6275            auto cbNode = dev_data->commandBufferMap.find(cb);
6276            if (cbNode != dev_data->commandBufferMap.end()) {
6277                // Set CB as invalid and record destroyed framebuffer
6278                cbNode->second->state = CB_INVALID;
6279                loader_platform_thread_lock_mutex(&globalLock);
6280                cbNode->second->destroyedFramebuffers.insert(framebuffer);
6281                loader_platform_thread_unlock_mutex(&globalLock);
6282            }
6283        }
6284        loader_platform_thread_lock_mutex(&globalLock);
6285        dev_data->frameBufferMap.erase(framebuffer);
6286        loader_platform_thread_unlock_mutex(&globalLock);
6287    }
6288    dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
6289}
6290
6291VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6292vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
6293    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6294    dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
6295    loader_platform_thread_lock_mutex(&globalLock);
6296    dev_data->renderPassMap.erase(renderPass);
6297    loader_platform_thread_unlock_mutex(&globalLock);
6298}
6299
6300VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
6301                                                              const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
6302    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6303
6304    VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
6305
6306    if (VK_SUCCESS == result) {
6307        loader_platform_thread_lock_mutex(&globalLock);
6308#if MTMERGESOURCE
6309        add_object_create_info(dev_data, (uint64_t)*pBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, pCreateInfo);
6310#endif
6311        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
6312        dev_data->bufferMap[*pBuffer].create_info = unique_ptr<VkBufferCreateInfo>(new VkBufferCreateInfo(*pCreateInfo));
6313        dev_data->bufferMap[*pBuffer].in_use.store(0);
6314        loader_platform_thread_unlock_mutex(&globalLock);
6315    }
6316    return result;
6317}
6318
6319VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6320                                                                  const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
6321    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6322    VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
6323    if (VK_SUCCESS == result) {
6324        loader_platform_thread_lock_mutex(&globalLock);
6325        dev_data->bufferViewMap[*pView] = VkBufferViewCreateInfo(*pCreateInfo);
6326#if MTMERGESOURCE
6327        // In order to create a valid buffer view, the buffer must have been created with at least one of the
6328        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
6329        validate_buffer_usage_flags(dev_data, device, pCreateInfo->buffer,
6330                                    VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, VK_FALSE,
6331                                    "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
6332#endif
6333        loader_platform_thread_unlock_mutex(&globalLock);
6334    }
6335    return result;
6336}
6337
6338VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
6339                                                             const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
6340    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6341
6342    VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
6343
6344    if (VK_SUCCESS == result) {
6345        loader_platform_thread_lock_mutex(&globalLock);
6346#if MTMERGESOURCE
6347        add_object_create_info(dev_data, (uint64_t)*pImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, pCreateInfo);
6348#endif
6349        IMAGE_LAYOUT_NODE image_node;
6350        image_node.layout = pCreateInfo->initialLayout;
6351        image_node.format = pCreateInfo->format;
6352        dev_data->imageMap[*pImage].createInfo = *pCreateInfo;
6353        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
6354        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
6355        dev_data->imageLayoutMap[subpair] = image_node;
6356        loader_platform_thread_unlock_mutex(&globalLock);
6357    }
6358    return result;
6359}
6360
6361static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
6362    /* expects globalLock to be held by caller */
6363
6364    auto image_node_it = dev_data->imageMap.find(image);
6365    if (image_node_it != dev_data->imageMap.end()) {
6366        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
6367         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
6368         * the actual values.
6369         */
6370        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
6371            range->levelCount = image_node_it->second.createInfo.mipLevels - range->baseMipLevel;
6372        }
6373
6374        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
6375            range->layerCount = image_node_it->second.createInfo.arrayLayers - range->baseArrayLayer;
6376        }
6377    }
6378}
6379
6380// Return the correct layer/level counts if the caller used the special
6381// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
6382static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6383                                         VkImage image) {
6384    /* expects globalLock to be held by caller */
6385
6386    *levels = range.levelCount;
6387    *layers = range.layerCount;
6388    auto image_node_it = dev_data->imageMap.find(image);
6389    if (image_node_it != dev_data->imageMap.end()) {
6390        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
6391            *levels = image_node_it->second.createInfo.mipLevels - range.baseMipLevel;
6392        }
6393        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
6394            *layers = image_node_it->second.createInfo.arrayLayers - range.baseArrayLayer;
6395        }
6396    }
6397}
6398
6399VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6400                                                                 const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6401    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6402    VkResult result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
6403    if (VK_SUCCESS == result) {
6404        loader_platform_thread_lock_mutex(&globalLock);
6405        VkImageViewCreateInfo localCI = VkImageViewCreateInfo(*pCreateInfo);
6406        ResolveRemainingLevelsLayers(dev_data, &localCI.subresourceRange, pCreateInfo->image);
6407        dev_data->imageViewMap[*pView] = localCI;
6408#if MTMERGESOURCE
6409        // Validate that img has correct usage flags set
6410        validate_image_usage_flags(dev_data, device, pCreateInfo->image,
6411                                   VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT |
6412                                       VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6413                                   VK_FALSE, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT");
6414#endif
6415        loader_platform_thread_unlock_mutex(&globalLock);
6416    }
6417    return result;
6418}
6419
6420VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6421vkCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6422    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6423    VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
6424    if (VK_SUCCESS == result) {
6425        loader_platform_thread_lock_mutex(&globalLock);
6426        FENCE_NODE *pFN = &dev_data->fenceMap[*pFence];
6427#if MTMERGESOURCE
6428        memset(pFN, 0, sizeof(MT_FENCE_INFO));
6429        memcpy(&(pFN->createInfo), pCreateInfo, sizeof(VkFenceCreateInfo));
6430        if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
6431            pFN->firstTimeFlag = VK_TRUE;
6432        }
6433#endif
6434        pFN->in_use.store(0);
6435        loader_platform_thread_unlock_mutex(&globalLock);
6436    }
6437    return result;
6438}
6439
6440// TODO handle pipeline caches
6441VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6442                                                     const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6443    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6444    VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6445    return result;
6446}
6447
6448VKAPI_ATTR void VKAPI_CALL
6449vkDestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
6450    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6451    dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator);
6452}
6453
6454VKAPI_ATTR VkResult VKAPI_CALL
6455vkGetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
6456    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6457    VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6458    return result;
6459}
6460
6461VKAPI_ATTR VkResult VKAPI_CALL
6462vkMergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
6463    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6464    VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6465    return result;
6466}
6467
6468// utility function to set collective state for pipeline
6469void set_pipeline_state(PIPELINE_NODE *pPipe) {
6470    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
6471    if (pPipe->graphicsPipelineCI.pColorBlendState) {
6472        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
6473            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
6474                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6475                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6476                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6477                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6478                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6479                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6480                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6481                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
6482                    pPipe->blendConstantsEnabled = true;
6483                }
6484            }
6485        }
6486    }
6487}
6488
6489VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6490vkCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6491                          const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6492                          VkPipeline *pPipelines) {
6493    VkResult result = VK_SUCCESS;
6494    // TODO What to do with pipelineCache?
6495    // The order of operations here is a little convoluted but gets the job done
6496    //  1. Pipeline create state is first shadowed into PIPELINE_NODE struct
6497    //  2. Create state is then validated (which uses flags setup during shadowing)
6498    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6499    VkBool32 skipCall = VK_FALSE;
6500    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6501    vector<PIPELINE_NODE *> pPipeNode(count);
6502    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6503
6504    uint32_t i = 0;
6505    loader_platform_thread_lock_mutex(&globalLock);
6506
6507    for (i = 0; i < count; i++) {
6508        pPipeNode[i] = initGraphicsPipeline(dev_data, &pCreateInfos[i]);
6509        skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
6510    }
6511
6512    if (VK_FALSE == skipCall) {
6513        loader_platform_thread_unlock_mutex(&globalLock);
6514        result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6515                                                                          pPipelines);
6516        loader_platform_thread_lock_mutex(&globalLock);
6517        for (i = 0; i < count; i++) {
6518            pPipeNode[i]->pipeline = pPipelines[i];
6519            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6520        }
6521        loader_platform_thread_unlock_mutex(&globalLock);
6522    } else {
6523        for (i = 0; i < count; i++) {
6524            delete pPipeNode[i];
6525        }
6526        loader_platform_thread_unlock_mutex(&globalLock);
6527        return VK_ERROR_VALIDATION_FAILED_EXT;
6528    }
6529    return result;
6530}
6531
6532VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6533vkCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6534                         const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6535                         VkPipeline *pPipelines) {
6536    VkResult result = VK_SUCCESS;
6537    VkBool32 skipCall = VK_FALSE;
6538
6539    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6540    vector<PIPELINE_NODE *> pPipeNode(count);
6541    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6542
6543    uint32_t i = 0;
6544    loader_platform_thread_lock_mutex(&globalLock);
6545    for (i = 0; i < count; i++) {
6546        // TODO: Verify compute stage bits
6547
6548        // Create and initialize internal tracking data structure
6549        pPipeNode[i] = new PIPELINE_NODE;
6550        memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
6551
6552        // TODO: Add Compute Pipeline Verification
6553        // skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
6554    }
6555
6556    if (VK_FALSE == skipCall) {
6557        loader_platform_thread_unlock_mutex(&globalLock);
6558        result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6559                                                                         pPipelines);
6560        loader_platform_thread_lock_mutex(&globalLock);
6561        for (i = 0; i < count; i++) {
6562            pPipeNode[i]->pipeline = pPipelines[i];
6563            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6564        }
6565        loader_platform_thread_unlock_mutex(&globalLock);
6566    } else {
6567        for (i = 0; i < count; i++) {
6568            // Clean up any locally allocated data structures
6569            delete pPipeNode[i];
6570        }
6571        loader_platform_thread_unlock_mutex(&globalLock);
6572        return VK_ERROR_VALIDATION_FAILED_EXT;
6573    }
6574    return result;
6575}
6576
6577VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6578                                                               const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6579    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6580    VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6581    if (VK_SUCCESS == result) {
6582        loader_platform_thread_lock_mutex(&globalLock);
6583        dev_data->sampleMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
6584        loader_platform_thread_unlock_mutex(&globalLock);
6585    }
6586    return result;
6587}
6588
6589VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6590vkCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6591                            const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
6592    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6593    VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6594    if (VK_SUCCESS == result) {
6595        // TODOSC : Capture layout bindings set
6596        LAYOUT_NODE *pNewNode = new LAYOUT_NODE;
6597        if (NULL == pNewNode) {
6598            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
6599                        (uint64_t)*pSetLayout, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6600                        "Out of memory while attempting to allocate LAYOUT_NODE in vkCreateDescriptorSetLayout()"))
6601                return VK_ERROR_VALIDATION_FAILED_EXT;
6602        }
6603        memcpy((void *)&pNewNode->createInfo, pCreateInfo, sizeof(VkDescriptorSetLayoutCreateInfo));
6604        pNewNode->createInfo.pBindings = new VkDescriptorSetLayoutBinding[pCreateInfo->bindingCount];
6605        memcpy((void *)pNewNode->createInfo.pBindings, pCreateInfo->pBindings,
6606               sizeof(VkDescriptorSetLayoutBinding) * pCreateInfo->bindingCount);
6607        // g++ does not like reserve with size 0
6608        if (pCreateInfo->bindingCount)
6609            pNewNode->bindingToIndexMap.reserve(pCreateInfo->bindingCount);
6610        uint32_t totalCount = 0;
6611        for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
6612            if (!pNewNode->bindingToIndexMap.emplace(pCreateInfo->pBindings[i].binding, i).second) {
6613                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6614                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)*pSetLayout, __LINE__,
6615                            DRAWSTATE_INVALID_LAYOUT, "DS", "duplicated binding number in "
6616                                                            "VkDescriptorSetLayoutBinding"))
6617                    return VK_ERROR_VALIDATION_FAILED_EXT;
6618            } else {
6619                pNewNode->bindingToIndexMap[pCreateInfo->pBindings[i].binding] = i;
6620            }
6621            totalCount += pCreateInfo->pBindings[i].descriptorCount;
6622            if (pCreateInfo->pBindings[i].pImmutableSamplers) {
6623                VkSampler **ppIS = (VkSampler **)&pNewNode->createInfo.pBindings[i].pImmutableSamplers;
6624                *ppIS = new VkSampler[pCreateInfo->pBindings[i].descriptorCount];
6625                memcpy(*ppIS, pCreateInfo->pBindings[i].pImmutableSamplers,
6626                       pCreateInfo->pBindings[i].descriptorCount * sizeof(VkSampler));
6627            }
6628        }
6629        pNewNode->layout = *pSetLayout;
6630        pNewNode->startIndex = 0;
6631        if (totalCount > 0) {
6632            pNewNode->descriptorTypes.resize(totalCount);
6633            pNewNode->stageFlags.resize(totalCount);
6634            uint32_t offset = 0;
6635            uint32_t j = 0;
6636            VkDescriptorType dType;
6637            for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
6638                dType = pCreateInfo->pBindings[i].descriptorType;
6639                for (j = 0; j < pCreateInfo->pBindings[i].descriptorCount; j++) {
6640                    pNewNode->descriptorTypes[offset + j] = dType;
6641                    pNewNode->stageFlags[offset + j] = pCreateInfo->pBindings[i].stageFlags;
6642                    if ((dType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
6643                        (dType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
6644                        pNewNode->dynamicDescriptorCount++;
6645                    }
6646                }
6647                offset += j;
6648            }
6649            pNewNode->endIndex = pNewNode->startIndex + totalCount - 1;
6650        } else { // no descriptors
6651            pNewNode->endIndex = 0;
6652        }
6653        // Put new node at Head of global Layer list
6654        loader_platform_thread_lock_mutex(&globalLock);
6655        dev_data->descriptorSetLayoutMap[*pSetLayout] = pNewNode;
6656        loader_platform_thread_unlock_mutex(&globalLock);
6657    }
6658    return result;
6659}
6660
6661static bool validatePushConstantSize(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6662                                     const char *caller_name) {
6663    bool skipCall = false;
6664    if ((offset + size) > dev_data->physDevProperties.properties.limits.maxPushConstantsSize) {
6665        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6666                           DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
6667                                                                 "exceeds this device's maxPushConstantSize of %u.",
6668                           caller_name, offset, size, dev_data->physDevProperties.properties.limits.maxPushConstantsSize);
6669    }
6670    return skipCall;
6671}
6672
6673VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6674                                                      const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
6675    bool skipCall = false;
6676    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6677    uint32_t i = 0;
6678    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6679        skipCall |= validatePushConstantSize(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
6680                                             pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()");
6681        if ((pCreateInfo->pPushConstantRanges[i].size == 0) || ((pCreateInfo->pPushConstantRanges[i].size & 0x3) != 0)) {
6682            skipCall |=
6683                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6684                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constant index %u with "
6685                                                              "size %u. Size must be greater than zero and a multiple of 4.",
6686                        i, pCreateInfo->pPushConstantRanges[i].size);
6687        }
6688        // TODO : Add warning if ranges overlap
6689    }
6690    VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
6691    if (VK_SUCCESS == result) {
6692        loader_platform_thread_lock_mutex(&globalLock);
6693        // TODOSC : Merge capture of the setLayouts per pipeline
6694        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
6695        plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount);
6696        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6697            plNode.descriptorSetLayouts[i] = pCreateInfo->pSetLayouts[i];
6698        }
6699        plNode.pushConstantRanges.resize(pCreateInfo->pushConstantRangeCount);
6700        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6701            plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i];
6702        }
6703        loader_platform_thread_unlock_mutex(&globalLock);
6704    }
6705    return result;
6706}
6707
6708VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6709vkCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
6710                       VkDescriptorPool *pDescriptorPool) {
6711    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6712    VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
6713    if (VK_SUCCESS == result) {
6714        // Insert this pool into Global Pool LL at head
6715        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6716                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool %#" PRIxLEAST64,
6717                    (uint64_t)*pDescriptorPool))
6718            return VK_ERROR_VALIDATION_FAILED_EXT;
6719        DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
6720        if (NULL == pNewNode) {
6721            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6722                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6723                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
6724                return VK_ERROR_VALIDATION_FAILED_EXT;
6725        } else {
6726            loader_platform_thread_lock_mutex(&globalLock);
6727            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6728            loader_platform_thread_unlock_mutex(&globalLock);
6729        }
6730    } else {
6731        // Need to do anything if pool create fails?
6732    }
6733    return result;
6734}
6735
6736VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6737vkResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
6738    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6739    VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags);
6740    if (VK_SUCCESS == result) {
6741        loader_platform_thread_lock_mutex(&globalLock);
6742        clearDescriptorPool(dev_data, device, descriptorPool, flags);
6743        loader_platform_thread_unlock_mutex(&globalLock);
6744    }
6745    return result;
6746}
6747
6748VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6749vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
6750    VkBool32 skipCall = VK_FALSE;
6751    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6752
6753    loader_platform_thread_lock_mutex(&globalLock);
6754    // Verify that requested descriptorSets are available in pool
6755    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6756    if (!pPoolNode) {
6757        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6758                            (uint64_t)pAllocateInfo->descriptorPool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
6759                            "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
6760                            (uint64_t)pAllocateInfo->descriptorPool);
6761    } else { // Make sure pool has all the available descriptors before calling down chain
6762        skipCall |= validate_descriptor_availability_in_pool(dev_data, pPoolNode, pAllocateInfo->descriptorSetCount,
6763                                                             pAllocateInfo->pSetLayouts);
6764    }
6765    loader_platform_thread_unlock_mutex(&globalLock);
6766    if (skipCall)
6767        return VK_ERROR_VALIDATION_FAILED_EXT;
6768    VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6769    if (VK_SUCCESS == result) {
6770        loader_platform_thread_lock_mutex(&globalLock);
6771        DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6772        if (pPoolNode) {
6773            if (pAllocateInfo->descriptorSetCount == 0) {
6774                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6775                        pAllocateInfo->descriptorSetCount, __LINE__, DRAWSTATE_NONE, "DS",
6776                        "AllocateDescriptorSets called with 0 count");
6777            }
6778            for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
6779                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6780                        (uint64_t)pDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS", "Created Descriptor Set %#" PRIxLEAST64,
6781                        (uint64_t)pDescriptorSets[i]);
6782                // Create new set node and add to head of pool nodes
6783                SET_NODE *pNewNode = new SET_NODE;
6784                if (NULL == pNewNode) {
6785                    if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6786                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6787                                DRAWSTATE_OUT_OF_MEMORY, "DS",
6788                                "Out of memory while attempting to allocate SET_NODE in vkAllocateDescriptorSets()")) {
6789                        loader_platform_thread_unlock_mutex(&globalLock);
6790                        return VK_ERROR_VALIDATION_FAILED_EXT;
6791                    }
6792                } else {
6793                    // TODO : Pool should store a total count of each type of Descriptor available
6794                    //  When descriptors are allocated, decrement the count and validate here
6795                    //  that the count doesn't go below 0. One reset/free need to bump count back up.
6796                    // Insert set at head of Set LL for this pool
6797                    pNewNode->pNext = pPoolNode->pSets;
6798                    pNewNode->in_use.store(0);
6799                    pPoolNode->pSets = pNewNode;
6800                    LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pAllocateInfo->pSetLayouts[i]);
6801                    if (NULL == pLayout) {
6802                        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6803                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pAllocateInfo->pSetLayouts[i],
6804                                    __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
6805                                    "Unable to find set layout node for layout %#" PRIxLEAST64
6806                                    " specified in vkAllocateDescriptorSets() call",
6807                                    (uint64_t)pAllocateInfo->pSetLayouts[i])) {
6808                            loader_platform_thread_unlock_mutex(&globalLock);
6809                            return VK_ERROR_VALIDATION_FAILED_EXT;
6810                        }
6811                    }
6812                    pNewNode->pLayout = pLayout;
6813                    pNewNode->pool = pAllocateInfo->descriptorPool;
6814                    pNewNode->set = pDescriptorSets[i];
6815                    pNewNode->descriptorCount = (pLayout->createInfo.bindingCount != 0) ? pLayout->endIndex + 1 : 0;
6816                    if (pNewNode->descriptorCount) {
6817                        pNewNode->pDescriptorUpdates.resize(pNewNode->descriptorCount);
6818                    }
6819                    dev_data->setMap[pDescriptorSets[i]] = pNewNode;
6820                }
6821            }
6822        }
6823        loader_platform_thread_unlock_mutex(&globalLock);
6824    }
6825    return result;
6826}
6827
6828VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6829vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6830    VkBool32 skipCall = VK_FALSE;
6831    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6832    // Make sure that no sets being destroyed are in-flight
6833    loader_platform_thread_lock_mutex(&globalLock);
6834    for (uint32_t i = 0; i < count; ++i)
6835        skipCall |= validateIdleDescriptorSet(dev_data, pDescriptorSets[i], "vkFreeDesriptorSets");
6836    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, descriptorPool);
6837    if (pPoolNode && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pPoolNode->createInfo.flags)) {
6838        // Can't Free from a NON_FREE pool
6839        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
6840                            (uint64_t)device, __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
6841                            "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6842                            "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
6843    }
6844    loader_platform_thread_unlock_mutex(&globalLock);
6845    if (VK_FALSE != skipCall)
6846        return VK_ERROR_VALIDATION_FAILED_EXT;
6847    VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6848    if (VK_SUCCESS == result) {
6849        loader_platform_thread_lock_mutex(&globalLock);
6850
6851        // Update available descriptor sets in pool
6852        pPoolNode->availableSets += count;
6853
6854        // For each freed descriptor add it back into the pool as available
6855        for (uint32_t i = 0; i < count; ++i) {
6856            SET_NODE *pSet = dev_data->setMap[pDescriptorSets[i]]; // getSetNode() without locking
6857            invalidateBoundCmdBuffers(dev_data, pSet);
6858            LAYOUT_NODE *pLayout = pSet->pLayout;
6859            uint32_t typeIndex = 0, poolSizeCount = 0;
6860            for (uint32_t j = 0; j < pLayout->createInfo.bindingCount; ++j) {
6861                typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType);
6862                poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount;
6863                pPoolNode->availableDescriptorTypeCount[typeIndex] += poolSizeCount;
6864            }
6865        }
6866        loader_platform_thread_unlock_mutex(&globalLock);
6867    }
6868    // TODO : Any other clean-up or book-keeping to do here?
6869    return result;
6870}
6871
6872VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6873vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6874                       uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6875    // dsUpdate will return VK_TRUE only if a bailout error occurs, so we want to call down tree when update returns VK_FALSE
6876    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6877    loader_platform_thread_lock_mutex(&globalLock);
6878#if MTMERGESOURCE
6879    // MTMTODO : Merge this in with existing update code below and handle descriptor copies case
6880    uint32_t j = 0;
6881    for (uint32_t i = 0; i < descriptorWriteCount; ++i) {
6882        if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
6883            for (j = 0; j < pDescriptorWrites[i].descriptorCount; ++j) {
6884                dev_data->descriptorSetMap[pDescriptorWrites[i].dstSet].images.push_back(
6885                    pDescriptorWrites[i].pImageInfo[j].imageView);
6886            }
6887        } else if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
6888            for (j = 0; j < pDescriptorWrites[i].descriptorCount; ++j) {
6889                dev_data->descriptorSetMap[pDescriptorWrites[i].dstSet].buffers.push_back(
6890                    dev_data->bufferViewMap[pDescriptorWrites[i].pTexelBufferView[j]].buffer);
6891            }
6892        } else if (pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
6893                   pDescriptorWrites[i].descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
6894            for (j = 0; j < pDescriptorWrites[i].descriptorCount; ++j) {
6895                dev_data->descriptorSetMap[pDescriptorWrites[i].dstSet].buffers.push_back(
6896                    pDescriptorWrites[i].pBufferInfo[j].buffer);
6897            }
6898        }
6899    }
6900#endif
6901    VkBool32 rtn = dsUpdate(dev_data, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
6902    loader_platform_thread_unlock_mutex(&globalLock);
6903    if (!rtn) {
6904        dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6905                                                              pDescriptorCopies);
6906    }
6907}
6908
6909VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6910vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6911    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6912    VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6913    if (VK_SUCCESS == result) {
6914        loader_platform_thread_lock_mutex(&globalLock);
6915        auto const &cp_it = dev_data->commandPoolMap.find(pCreateInfo->commandPool);
6916        if (cp_it != dev_data->commandPoolMap.end()) {
6917            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6918                // Add command buffer to its commandPool map
6919                cp_it->second.commandBuffers.push_back(pCommandBuffer[i]);
6920                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6921                // Add command buffer to map
6922                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6923                resetCB(dev_data, pCommandBuffer[i]);
6924                pCB->createInfo = *pCreateInfo;
6925                pCB->device = device;
6926            }
6927        }
6928#if MTMERGESOURCE
6929        printCBList(dev_data, device);
6930#endif
6931        loader_platform_thread_unlock_mutex(&globalLock);
6932    }
6933    return result;
6934}
6935
6936VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6937vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6938    VkBool32 skipCall = VK_FALSE;
6939    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6940    loader_platform_thread_lock_mutex(&globalLock);
6941    // Validate command buffer level
6942    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6943    if (pCB) {
6944#if MTMERGESOURCE
6945        bool commandBufferComplete = false;
6946        // MTMTODO : Merge this with code below
6947        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6948        skipCall = checkCBCompleted(dev_data, commandBuffer, &commandBufferComplete);
6949
6950        if (!commandBufferComplete) {
6951            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6952                                (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6953                                "Calling vkBeginCommandBuffer() on active CB %p before it has completed. "
6954                                "You must check CB flag before this call.",
6955                                commandBuffer);
6956        }
6957#endif
6958        if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6959            // Secondary Command Buffer
6960            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6961            if (!pInfo) {
6962                skipCall |=
6963                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6964                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6965                            "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must have inheritance info.",
6966                            reinterpret_cast<void *>(commandBuffer));
6967            } else {
6968                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6969                    if (!pInfo->renderPass) { // renderpass should NOT be null for an Secondary CB
6970                        skipCall |= log_msg(
6971                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6972                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6973                            "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must specify a valid renderpass parameter.",
6974                            reinterpret_cast<void *>(commandBuffer));
6975                    }
6976                    if (!pInfo->framebuffer) { // framebuffer may be null for an Secondary CB, but this affects perf
6977                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6978                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6979                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE,
6980                                            "DS", "vkBeginCommandBuffer(): Secondary Command Buffers (%p) may perform better if a "
6981                                                  "valid framebuffer parameter is specified.",
6982                                            reinterpret_cast<void *>(commandBuffer));
6983                    } else {
6984                        string errorString = "";
6985                        auto fbNode = dev_data->frameBufferMap.find(pInfo->framebuffer);
6986                        if (fbNode != dev_data->frameBufferMap.end()) {
6987                            VkRenderPass fbRP = fbNode->second.createInfo.renderPass;
6988                            if (!verify_renderpass_compatibility(dev_data, fbRP, pInfo->renderPass, errorString)) {
6989                                // renderPass that framebuffer was created with
6990                                // must
6991                                // be compatible with local renderPass
6992                                skipCall |=
6993                                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6994                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6995                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
6996                                            "DS", "vkBeginCommandBuffer(): Secondary Command "
6997                                                  "Buffer (%p) renderPass (%#" PRIxLEAST64 ") is incompatible w/ framebuffer "
6998                                                  "(%#" PRIxLEAST64 ") w/ render pass (%#" PRIxLEAST64 ") due to: %s",
6999                                            reinterpret_cast<void *>(commandBuffer), (uint64_t)(pInfo->renderPass),
7000                                            (uint64_t)(pInfo->framebuffer), (uint64_t)(fbRP), errorString.c_str());
7001                            }
7002                            // Connect this framebuffer to this cmdBuffer
7003                            fbNode->second.referencingCmdBuffers.insert(pCB->commandBuffer);
7004                        }
7005                    }
7006                }
7007                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
7008                     dev_data->physDevProperties.features.occlusionQueryPrecise == VK_FALSE) &&
7009                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
7010                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7011                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
7012                                        __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7013                                        "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must not have "
7014                                        "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
7015                                        "support precise occlusion queries.",
7016                                        reinterpret_cast<void *>(commandBuffer));
7017                }
7018            }
7019            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
7020                auto rp_data = dev_data->renderPassMap.find(pInfo->renderPass);
7021                if (rp_data != dev_data->renderPassMap.end() && rp_data->second && rp_data->second->pCreateInfo) {
7022                    if (pInfo->subpass >= rp_data->second->pCreateInfo->subpassCount) {
7023                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7024                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7025                                            DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7026                                            "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must has a subpass index (%d) "
7027                                            "that is less than the number of subpasses (%d).",
7028                                            (void *)commandBuffer, pInfo->subpass, rp_data->second->pCreateInfo->subpassCount);
7029                    }
7030                }
7031            }
7032        }
7033        if (CB_RECORDING == pCB->state) {
7034            skipCall |=
7035                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7036                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7037                        "vkBeginCommandBuffer(): Cannot call Begin on CB (%#" PRIxLEAST64
7038                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
7039                        (uint64_t)commandBuffer);
7040        } else if (CB_RECORDED == pCB->state) {
7041            VkCommandPool cmdPool = pCB->createInfo.commandPool;
7042            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
7043                skipCall |=
7044                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7045                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7046                            "Call to vkBeginCommandBuffer() on command buffer (%#" PRIxLEAST64
7047                            ") attempts to implicitly reset cmdBuffer created from command pool (%#" PRIxLEAST64
7048                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7049                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
7050            }
7051            resetCB(dev_data, commandBuffer);
7052        }
7053        // Set updated state here in case implicit reset occurs above
7054        pCB->state = CB_RECORDING;
7055        pCB->beginInfo = *pBeginInfo;
7056        if (pCB->beginInfo.pInheritanceInfo) {
7057            pCB->inheritanceInfo = *(pCB->beginInfo.pInheritanceInfo);
7058            pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo;
7059        }
7060    } else {
7061        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7062                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
7063                            "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB %p!", (void *)commandBuffer);
7064    }
7065    loader_platform_thread_unlock_mutex(&globalLock);
7066    if (VK_FALSE != skipCall) {
7067        return VK_ERROR_VALIDATION_FAILED_EXT;
7068    }
7069    VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo);
7070#if MTMERGESOURCE
7071    loader_platform_thread_lock_mutex(&globalLock);
7072    clear_cmd_buf_and_mem_references(dev_data, commandBuffer);
7073    loader_platform_thread_unlock_mutex(&globalLock);
7074#endif
7075    return result;
7076}
7077
7078VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffer commandBuffer) {
7079    VkBool32 skipCall = VK_FALSE;
7080    VkResult result = VK_SUCCESS;
7081    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7082    loader_platform_thread_lock_mutex(&globalLock);
7083    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7084    if (pCB) {
7085        if (pCB->state != CB_RECORDING) {
7086            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkEndCommandBuffer()");
7087        }
7088        for (auto query : pCB->activeQueries) {
7089            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7090                                DRAWSTATE_INVALID_QUERY, "DS",
7091                                "Ending command buffer with in progress query: queryPool %" PRIu64 ", index %d",
7092                                (uint64_t)(query.pool), query.index);
7093        }
7094    }
7095    if (VK_FALSE == skipCall) {
7096        loader_platform_thread_unlock_mutex(&globalLock);
7097        result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
7098        loader_platform_thread_lock_mutex(&globalLock);
7099        if (VK_SUCCESS == result) {
7100            pCB->state = CB_RECORDED;
7101            // Reset CB status flags
7102            pCB->status = 0;
7103            printCB(dev_data, commandBuffer);
7104        }
7105    } else {
7106        result = VK_ERROR_VALIDATION_FAILED_EXT;
7107    }
7108    loader_platform_thread_unlock_mutex(&globalLock);
7109    return result;
7110}
7111
7112VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
7113vkResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
7114    VkBool32 skipCall = VK_FALSE;
7115    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7116    loader_platform_thread_lock_mutex(&globalLock);
7117#if MTMERGESOURCE
7118    bool commandBufferComplete = false;
7119    // Verify that CB is complete (not in-flight)
7120    skipCall = checkCBCompleted(dev_data, commandBuffer, &commandBufferComplete);
7121    if (!commandBufferComplete) {
7122        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7123                            (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
7124                            "Resetting CB %p before it has completed. You must check CB "
7125                            "flag before calling vkResetCommandBuffer().",
7126                            commandBuffer);
7127    }
7128    // Clear memory references as this point.
7129    clear_cmd_buf_and_mem_references(dev_data, commandBuffer);
7130#endif
7131    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7132    VkCommandPool cmdPool = pCB->createInfo.commandPool;
7133    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
7134        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7135                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7136                            "Attempt to reset command buffer (%#" PRIxLEAST64 ") created from command pool (%#" PRIxLEAST64
7137                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7138                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
7139    }
7140    if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
7141        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7142                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7143                            "Attempt to reset command buffer (%#" PRIxLEAST64 ") which is in use.",
7144                            reinterpret_cast<uint64_t>(commandBuffer));
7145    }
7146    loader_platform_thread_unlock_mutex(&globalLock);
7147    if (skipCall != VK_FALSE)
7148        return VK_ERROR_VALIDATION_FAILED_EXT;
7149    VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags);
7150    if (VK_SUCCESS == result) {
7151        loader_platform_thread_lock_mutex(&globalLock);
7152        resetCB(dev_data, commandBuffer);
7153        loader_platform_thread_unlock_mutex(&globalLock);
7154    }
7155    return result;
7156}
7157#if MTMERGESOURCE
7158// TODO : For any vkCmdBind* calls that include an object which has mem bound to it,
7159//    need to account for that mem now having binding to given commandBuffer
7160#endif
7161VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7162vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
7163    VkBool32 skipCall = VK_FALSE;
7164    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7165    loader_platform_thread_lock_mutex(&globalLock);
7166    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7167    if (pCB) {
7168        skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
7169        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
7170            skipCall |=
7171                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7172                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
7173                        "Incorrectly binding compute pipeline (%#" PRIxLEAST64 ") during active RenderPass (%#" PRIxLEAST64 ")",
7174                        (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass);
7175        }
7176
7177        PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
7178        if (pPN) {
7179            pCB->lastBound[pipelineBindPoint].pipeline = pipeline;
7180            set_cb_pso_status(pCB, pPN);
7181            set_pipeline_state(pPN);
7182            skipCall |= validatePipelineState(dev_data, pCB, pipelineBindPoint, pipeline);
7183        } else {
7184            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7185                                (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
7186                                "Attempt to bind Pipeline %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
7187        }
7188    }
7189    loader_platform_thread_unlock_mutex(&globalLock);
7190    if (VK_FALSE == skipCall)
7191        dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
7192}
7193
7194VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7195vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
7196    VkBool32 skipCall = VK_FALSE;
7197    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7198    loader_platform_thread_lock_mutex(&globalLock);
7199    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7200    if (pCB) {
7201        skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
7202        pCB->status |= CBSTATUS_VIEWPORT_SET;
7203        pCB->viewports.resize(viewportCount);
7204        memcpy(pCB->viewports.data(), pViewports, viewportCount * sizeof(VkViewport));
7205    }
7206    loader_platform_thread_unlock_mutex(&globalLock);
7207    if (VK_FALSE == skipCall)
7208        dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
7209}
7210
7211VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7212vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
7213    VkBool32 skipCall = VK_FALSE;
7214    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7215    loader_platform_thread_lock_mutex(&globalLock);
7216    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7217    if (pCB) {
7218        skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
7219        pCB->status |= CBSTATUS_SCISSOR_SET;
7220        pCB->scissors.resize(scissorCount);
7221        memcpy(pCB->scissors.data(), pScissors, scissorCount * sizeof(VkRect2D));
7222    }
7223    loader_platform_thread_unlock_mutex(&globalLock);
7224    if (VK_FALSE == skipCall)
7225        dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
7226}
7227
7228VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
7229    VkBool32 skipCall = VK_FALSE;
7230    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7231    loader_platform_thread_lock_mutex(&globalLock);
7232    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7233    if (pCB) {
7234        skipCall |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
7235        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
7236    }
7237    loader_platform_thread_unlock_mutex(&globalLock);
7238    if (VK_FALSE == skipCall)
7239        dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth);
7240}
7241
7242VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7243vkCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
7244    VkBool32 skipCall = VK_FALSE;
7245    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7246    loader_platform_thread_lock_mutex(&globalLock);
7247    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7248    if (pCB) {
7249        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
7250        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
7251    }
7252    loader_platform_thread_unlock_mutex(&globalLock);
7253    if (VK_FALSE == skipCall)
7254        dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp,
7255                                                         depthBiasSlopeFactor);
7256}
7257
7258VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
7259    VkBool32 skipCall = VK_FALSE;
7260    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7261    loader_platform_thread_lock_mutex(&globalLock);
7262    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7263    if (pCB) {
7264        skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
7265        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
7266    }
7267    loader_platform_thread_unlock_mutex(&globalLock);
7268    if (VK_FALSE == skipCall)
7269        dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants);
7270}
7271
7272VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7273vkCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
7274    VkBool32 skipCall = VK_FALSE;
7275    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7276    loader_platform_thread_lock_mutex(&globalLock);
7277    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7278    if (pCB) {
7279        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
7280        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
7281    }
7282    loader_platform_thread_unlock_mutex(&globalLock);
7283    if (VK_FALSE == skipCall)
7284        dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
7285}
7286
7287VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7288vkCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
7289    VkBool32 skipCall = VK_FALSE;
7290    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7291    loader_platform_thread_lock_mutex(&globalLock);
7292    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7293    if (pCB) {
7294        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
7295        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
7296    }
7297    loader_platform_thread_unlock_mutex(&globalLock);
7298    if (VK_FALSE == skipCall)
7299        dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
7300}
7301
7302VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7303vkCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
7304    VkBool32 skipCall = VK_FALSE;
7305    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7306    loader_platform_thread_lock_mutex(&globalLock);
7307    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7308    if (pCB) {
7309        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7310        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7311    }
7312    loader_platform_thread_unlock_mutex(&globalLock);
7313    if (VK_FALSE == skipCall)
7314        dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
7315}
7316
7317VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7318vkCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
7319    VkBool32 skipCall = VK_FALSE;
7320    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7321    loader_platform_thread_lock_mutex(&globalLock);
7322    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7323    if (pCB) {
7324        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7325        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7326    }
7327    loader_platform_thread_unlock_mutex(&globalLock);
7328    if (VK_FALSE == skipCall)
7329        dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference);
7330}
7331
7332VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7333vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
7334                        uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7335                        const uint32_t *pDynamicOffsets) {
7336    VkBool32 skipCall = VK_FALSE;
7337    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7338    loader_platform_thread_lock_mutex(&globalLock);
7339#if MTMERGESOURCE
7340    // MTMTODO : Merge this with code below
7341    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7342    if (cb_data != dev_data->commandBufferMap.end()) {
7343        // MTMTODO : activeDescriptorSets should be merged with lastBound.boundDescriptorSets
7344        std::vector<VkDescriptorSet> &activeDescriptorSets = cb_data->second->activeDescriptorSets;
7345        if (activeDescriptorSets.size() < (setCount + firstSet)) {
7346            activeDescriptorSets.resize(setCount + firstSet);
7347        }
7348        for (uint32_t i = 0; i < setCount; ++i) {
7349            activeDescriptorSets[i + firstSet] = pDescriptorSets[i];
7350        }
7351    }
7352    // TODO : Somewhere need to verify that all textures referenced by shaders in DS are in some type of *SHADER_READ* state
7353#endif
7354    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7355    if (pCB) {
7356        if (pCB->state == CB_RECORDING) {
7357            // Track total count of dynamic descriptor types to make sure we have an offset for each one
7358            uint32_t totalDynamicDescriptors = 0;
7359            string errorString = "";
7360            uint32_t lastSetIndex = firstSet + setCount - 1;
7361            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size())
7362                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7363            VkDescriptorSet oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
7364            for (uint32_t i = 0; i < setCount; i++) {
7365                SET_NODE *pSet = getSetNode(dev_data, pDescriptorSets[i]);
7366                if (pSet) {
7367                    pCB->lastBound[pipelineBindPoint].uniqueBoundSets.insert(pDescriptorSets[i]);
7368                    pSet->boundCmdBuffers.insert(commandBuffer);
7369                    pCB->lastBound[pipelineBindPoint].pipelineLayout = layout;
7370                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pDescriptorSets[i];
7371                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7372                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7373                                        DRAWSTATE_NONE, "DS", "DS %#" PRIxLEAST64 " bound on pipeline %s",
7374                                        (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
7375                    if (!pSet->pUpdateStructs && (pSet->descriptorCount != 0)) {
7376                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7377                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
7378                                            __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
7379                                            "DS %#" PRIxLEAST64
7380                                            " bound but it was never updated. You may want to either update it or not bind it.",
7381                                            (uint64_t)pDescriptorSets[i]);
7382                    }
7383                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
7384                    if (!verify_set_layout_compatibility(dev_data, pSet, layout, i + firstSet, errorString)) {
7385                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7386                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
7387                                            __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
7388                                            "descriptorSet #%u being bound is not compatible with overlapping layout in "
7389                                            "pipelineLayout due to: %s",
7390                                            i, errorString.c_str());
7391                    }
7392                    if (pSet->pLayout->dynamicDescriptorCount) {
7393                        // First make sure we won't overstep bounds of pDynamicOffsets array
7394                        if ((totalDynamicDescriptors + pSet->pLayout->dynamicDescriptorCount) > dynamicOffsetCount) {
7395                            skipCall |=
7396                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7397                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7398                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7399                                        "descriptorSet #%u (%#" PRIxLEAST64
7400                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7401                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7402                                        i, (uint64_t)pDescriptorSets[i], pSet->pLayout->dynamicDescriptorCount,
7403                                        (dynamicOffsetCount - totalDynamicDescriptors));
7404                        } else { // Validate and store dynamic offsets with the set
7405                            // Validate Dynamic Offset Minimums
7406                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
7407                            for (uint32_t d = 0; d < pSet->descriptorCount; d++) {
7408                                if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7409                                    if (vk_safe_modulo(
7410                                            pDynamicOffsets[cur_dyn_offset],
7411                                            dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment) !=
7412                                        0) {
7413                                        skipCall |= log_msg(
7414                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7415                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7416                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
7417                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7418                                            "device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64,
7419                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7420                                            dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment);
7421                                    }
7422                                    cur_dyn_offset++;
7423                                } else if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7424                                    if (vk_safe_modulo(
7425                                            pDynamicOffsets[cur_dyn_offset],
7426                                            dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment) !=
7427                                        0) {
7428                                        skipCall |= log_msg(
7429                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7430                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7431                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
7432                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7433                                            "device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64,
7434                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7435                                            dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment);
7436                                    }
7437                                    cur_dyn_offset++;
7438                                }
7439                            }
7440                            // Keep running total of dynamic descriptor count to verify at the end
7441                            totalDynamicDescriptors += pSet->pLayout->dynamicDescriptorCount;
7442                        }
7443                    }
7444                } else {
7445                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7446                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7447                                        DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS %#" PRIxLEAST64 " that doesn't exist!",
7448                                        (uint64_t)pDescriptorSets[i]);
7449                }
7450                skipCall |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7451                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7452                if (firstSet > 0) { // Check set #s below the first bound set
7453                    for (uint32_t i = 0; i < firstSet; ++i) {
7454                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7455                            !verify_set_layout_compatibility(
7456                                dev_data, dev_data->setMap[pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i]], layout, i,
7457                                errorString)) {
7458                            skipCall |= log_msg(
7459                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7460                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7461                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7462                                "DescriptorSetDS %#" PRIxLEAST64
7463                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7464                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7465                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7466                        }
7467                    }
7468                }
7469                // Check if newly last bound set invalidates any remaining bound sets
7470                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7471                    if (oldFinalBoundSet &&
7472                        !verify_set_layout_compatibility(dev_data, dev_data->setMap[oldFinalBoundSet], layout, lastSetIndex,
7473                                                         errorString)) {
7474                        skipCall |=
7475                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7476                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)oldFinalBoundSet, __LINE__,
7477                                    DRAWSTATE_NONE, "DS", "DescriptorSetDS %#" PRIxLEAST64
7478                                                          " previously bound as set #%u is incompatible with set %#" PRIxLEAST64
7479                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
7480                                                          "disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7481                                    (uint64_t)oldFinalBoundSet, lastSetIndex,
7482                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7483                                    lastSetIndex + 1, (uint64_t)layout);
7484                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7485                    }
7486                }
7487                //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7488                if (totalDynamicDescriptors != dynamicOffsetCount) {
7489                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7490                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7491                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7492                                        "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7493                                        "is %u. It should exactly match the number of dynamic descriptors.",
7494                                        setCount, totalDynamicDescriptors, dynamicOffsetCount);
7495                }
7496                // Save dynamicOffsets bound to this CB
7497                for (uint32_t i = 0; i < dynamicOffsetCount; i++) {
7498                    pCB->lastBound[pipelineBindPoint].dynamicOffsets.push_back(pDynamicOffsets[i]);
7499                }
7500            }
7501            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7502            if (totalDynamicDescriptors != dynamicOffsetCount) {
7503                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7504                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7505                                    DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7506                                    "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7507                                    "is %u. It should exactly match the number of dynamic descriptors.",
7508                                    setCount, totalDynamicDescriptors, dynamicOffsetCount);
7509            }
7510            // Save dynamicOffsets bound to this CB
7511            for (uint32_t i = 0; i < dynamicOffsetCount; i++) {
7512                pCB->dynamicOffsets.emplace_back(pDynamicOffsets[i]);
7513            }
7514        } else {
7515            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7516        }
7517    }
7518    loader_platform_thread_unlock_mutex(&globalLock);
7519    if (VK_FALSE == skipCall)
7520        dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7521                                                               pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7522}
7523
7524VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7525vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
7526    VkBool32 skipCall = VK_FALSE;
7527    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7528    loader_platform_thread_lock_mutex(&globalLock);
7529#if MTMERGESOURCE
7530    VkDeviceMemory mem;
7531    skipCall =
7532        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7533    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7534    if (cb_data != dev_data->commandBufferMap.end()) {
7535        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindIndexBuffer()"); };
7536        cb_data->second->validate_functions.push_back(function);
7537    }
7538    // TODO : Somewhere need to verify that IBs have correct usage state flagged
7539#endif
7540    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7541    if (pCB) {
7542        skipCall |= addCmd(dev_data, pCB, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7543        VkDeviceSize offset_align = 0;
7544        switch (indexType) {
7545        case VK_INDEX_TYPE_UINT16:
7546            offset_align = 2;
7547            break;
7548        case VK_INDEX_TYPE_UINT32:
7549            offset_align = 4;
7550            break;
7551        default:
7552            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7553            break;
7554        }
7555        if (!offset_align || (offset % offset_align)) {
7556            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7557                                DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7558                                "vkCmdBindIndexBuffer() offset (%#" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7559                                offset, string_VkIndexType(indexType));
7560        }
7561        pCB->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7562    }
7563    loader_platform_thread_unlock_mutex(&globalLock);
7564    if (VK_FALSE == skipCall)
7565        dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7566}
7567
7568void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7569    uint32_t end = firstBinding + bindingCount;
7570    if (pCB->currentDrawData.buffers.size() < end) {
7571        pCB->currentDrawData.buffers.resize(end);
7572    }
7573    for (uint32_t i = 0; i < bindingCount; ++i) {
7574        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7575    }
7576}
7577
7578void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7579
7580VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7581                                                                  uint32_t bindingCount, const VkBuffer *pBuffers,
7582                                                                  const VkDeviceSize *pOffsets) {
7583    VkBool32 skipCall = VK_FALSE;
7584    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7585    loader_platform_thread_lock_mutex(&globalLock);
7586#if MTMERGESOURCE
7587    for (uint32_t i = 0; i < bindingCount; ++i) {
7588        VkDeviceMemory mem;
7589        skipCall |= get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)(pBuffers[i]),
7590                                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7591        auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7592        if (cb_data != dev_data->commandBufferMap.end()) {
7593            std::function<VkBool32()> function =
7594                [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindVertexBuffers()"); };
7595            cb_data->second->validate_functions.push_back(function);
7596        }
7597    }
7598    // TODO : Somewhere need to verify that VBs have correct usage state flagged
7599#endif
7600    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7601    if (pCB) {
7602        addCmd(dev_data, pCB, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7603        updateResourceTracking(pCB, firstBinding, bindingCount, pBuffers);
7604    } else {
7605        skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7606    }
7607    loader_platform_thread_unlock_mutex(&globalLock);
7608    if (VK_FALSE == skipCall)
7609        dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7610}
7611
7612#if MTMERGESOURCE
7613/* expects globalLock to be held by caller */
7614bool markStoreImagesAndBuffersAsWritten(VkCommandBuffer commandBuffer) {
7615    bool skip_call = false;
7616    layer_data *my_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7617    auto cb_data = my_data->commandBufferMap.find(commandBuffer);
7618    if (cb_data == my_data->commandBufferMap.end())
7619        return skip_call;
7620    std::vector<VkDescriptorSet> &activeDescriptorSets = cb_data->second->activeDescriptorSets;
7621    for (auto descriptorSet : activeDescriptorSets) {
7622        auto ds_data = my_data->descriptorSetMap.find(descriptorSet);
7623        if (ds_data == my_data->descriptorSetMap.end())
7624            continue;
7625        std::vector<VkImageView> images = ds_data->second.images;
7626        std::vector<VkBuffer> buffers = ds_data->second.buffers;
7627        for (auto imageView : images) {
7628            auto iv_data = my_data->imageViewMap.find(imageView);
7629            if (iv_data == my_data->imageViewMap.end())
7630                continue;
7631            VkImage image = iv_data->second.image;
7632            VkDeviceMemory mem;
7633            skip_call |=
7634                get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7635            std::function<VkBool32()> function = [=]() {
7636                set_memory_valid(my_data, mem, true, image);
7637                return VK_FALSE;
7638            };
7639            cb_data->second->validate_functions.push_back(function);
7640        }
7641        for (auto buffer : buffers) {
7642            VkDeviceMemory mem;
7643            skip_call |=
7644                get_mem_binding_from_object(my_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7645            std::function<VkBool32()> function = [=]() {
7646                set_memory_valid(my_data, mem, true);
7647                return VK_FALSE;
7648            };
7649            cb_data->second->validate_functions.push_back(function);
7650        }
7651    }
7652    return skip_call;
7653}
7654#endif
7655
7656VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7657                                                     uint32_t firstVertex, uint32_t firstInstance) {
7658    VkBool32 skipCall = VK_FALSE;
7659    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7660    loader_platform_thread_lock_mutex(&globalLock);
7661#if MTMERGESOURCE
7662    // MTMTODO : merge with code below
7663    skipCall = markStoreImagesAndBuffersAsWritten(commandBuffer);
7664#endif
7665    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7666    if (pCB) {
7667        skipCall |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
7668        pCB->drawCount[DRAW]++;
7669        skipCall |= validate_draw_state(dev_data, pCB, VK_FALSE);
7670        // TODO : Need to pass commandBuffer as srcObj here
7671        skipCall |=
7672            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7673                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW]++);
7674        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7675        if (VK_FALSE == skipCall) {
7676            updateResourceTrackingOnDraw(pCB);
7677        }
7678        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
7679    }
7680    loader_platform_thread_unlock_mutex(&globalLock);
7681    if (VK_FALSE == skipCall)
7682        dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
7683}
7684
7685VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
7686                                                            uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
7687                                                            uint32_t firstInstance) {
7688    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7689    VkBool32 skipCall = VK_FALSE;
7690    loader_platform_thread_lock_mutex(&globalLock);
7691#if MTMERGESOURCE
7692    // MTMTODO : merge with code below
7693    skipCall = markStoreImagesAndBuffersAsWritten(commandBuffer);
7694#endif
7695    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7696    if (pCB) {
7697        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
7698        pCB->drawCount[DRAW_INDEXED]++;
7699        skipCall |= validate_draw_state(dev_data, pCB, VK_TRUE);
7700        // TODO : Need to pass commandBuffer as srcObj here
7701        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7702                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7703                            "vkCmdDrawIndexed() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
7704        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7705        if (VK_FALSE == skipCall) {
7706            updateResourceTrackingOnDraw(pCB);
7707        }
7708        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
7709    }
7710    loader_platform_thread_unlock_mutex(&globalLock);
7711    if (VK_FALSE == skipCall)
7712        dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
7713                                                        firstInstance);
7714}
7715
7716VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7717vkCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7718    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7719    VkBool32 skipCall = VK_FALSE;
7720    loader_platform_thread_lock_mutex(&globalLock);
7721#if MTMERGESOURCE
7722    VkDeviceMemory mem;
7723    // MTMTODO : merge with code below
7724    skipCall =
7725        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7726    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndirect");
7727    skipCall |= markStoreImagesAndBuffersAsWritten(commandBuffer);
7728#endif
7729    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7730    if (pCB) {
7731        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
7732        pCB->drawCount[DRAW_INDIRECT]++;
7733        skipCall |= validate_draw_state(dev_data, pCB, VK_FALSE);
7734        // TODO : Need to pass commandBuffer as srcObj here
7735        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7736                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7737                            "vkCmdDrawIndirect() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
7738        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7739        if (VK_FALSE == skipCall) {
7740            updateResourceTrackingOnDraw(pCB);
7741        }
7742        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndirect");
7743    }
7744    loader_platform_thread_unlock_mutex(&globalLock);
7745    if (VK_FALSE == skipCall)
7746        dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
7747}
7748
7749VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7750vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7751    VkBool32 skipCall = VK_FALSE;
7752    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7753    loader_platform_thread_lock_mutex(&globalLock);
7754#if MTMERGESOURCE
7755    VkDeviceMemory mem;
7756    // MTMTODO : merge with code below
7757    skipCall =
7758        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7759    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndexedIndirect");
7760    skipCall |= markStoreImagesAndBuffersAsWritten(commandBuffer);
7761#endif
7762    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7763    if (pCB) {
7764        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
7765        pCB->drawCount[DRAW_INDEXED_INDIRECT]++;
7766        skipCall |= validate_draw_state(dev_data, pCB, VK_TRUE);
7767        // TODO : Need to pass commandBuffer as srcObj here
7768        skipCall |=
7769            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7770                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call #%" PRIu64 ", reporting DS state:",
7771                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
7772        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7773        if (VK_FALSE == skipCall) {
7774            updateResourceTrackingOnDraw(pCB);
7775        }
7776        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexedIndirect");
7777    }
7778    loader_platform_thread_unlock_mutex(&globalLock);
7779    if (VK_FALSE == skipCall)
7780        dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
7781}
7782
7783VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7784    VkBool32 skipCall = VK_FALSE;
7785    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7786    loader_platform_thread_lock_mutex(&globalLock);
7787#if MTMERGESOURCE
7788    skipCall = markStoreImagesAndBuffersAsWritten(commandBuffer);
7789#endif
7790    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7791    if (pCB) {
7792        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
7793        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
7794    }
7795    loader_platform_thread_unlock_mutex(&globalLock);
7796    if (VK_FALSE == skipCall)
7797        dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
7798}
7799
7800VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7801vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7802    VkBool32 skipCall = VK_FALSE;
7803    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7804    loader_platform_thread_lock_mutex(&globalLock);
7805#if MTMERGESOURCE
7806    VkDeviceMemory mem;
7807    skipCall =
7808        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7809    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDispatchIndirect");
7810    skipCall |= markStoreImagesAndBuffersAsWritten(commandBuffer);
7811#endif
7812    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7813    if (pCB) {
7814        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
7815        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatchIndirect");
7816    }
7817    loader_platform_thread_unlock_mutex(&globalLock);
7818    if (VK_FALSE == skipCall)
7819        dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset);
7820}
7821
7822VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7823                                                           uint32_t regionCount, const VkBufferCopy *pRegions) {
7824    VkBool32 skipCall = VK_FALSE;
7825    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7826    loader_platform_thread_lock_mutex(&globalLock);
7827#if MTMERGESOURCE
7828    VkDeviceMemory mem;
7829    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7830    skipCall =
7831        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7832    if (cb_data != dev_data->commandBufferMap.end()) {
7833        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBuffer()"); };
7834        cb_data->second->validate_functions.push_back(function);
7835    }
7836    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7837    skipCall |=
7838        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7839    if (cb_data != dev_data->commandBufferMap.end()) {
7840        std::function<VkBool32()> function = [=]() {
7841            set_memory_valid(dev_data, mem, true);
7842            return VK_FALSE;
7843        };
7844        cb_data->second->validate_functions.push_back(function);
7845    }
7846    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7847    // Validate that SRC & DST buffers have correct usage flags set
7848    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
7849                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7850    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7851                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7852#endif
7853    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7854    if (pCB) {
7855        skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
7856        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBuffer");
7857    }
7858    loader_platform_thread_unlock_mutex(&globalLock);
7859    if (VK_FALSE == skipCall)
7860        dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7861}
7862
7863VkBool32 VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, VkImageSubresourceLayers subLayers,
7864                                 VkImageLayout srcImageLayout) {
7865    VkBool32 skip_call = VK_FALSE;
7866
7867    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7868    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7869    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7870        uint32_t layer = i + subLayers.baseArrayLayer;
7871        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7872        IMAGE_CMD_BUF_LAYOUT_NODE node;
7873        if (!FindLayout(pCB, srcImage, sub, node)) {
7874            SetLayout(pCB, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
7875            continue;
7876        }
7877        if (node.layout != srcImageLayout) {
7878            // TODO: Improve log message in the next pass
7879            skip_call |=
7880                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7881                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
7882                                                                        "and doesn't match the current layout %s.",
7883                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
7884        }
7885    }
7886    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
7887        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7888            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7889            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7890                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7891                                 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
7892        } else {
7893            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7894                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
7895                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
7896                                 string_VkImageLayout(srcImageLayout));
7897        }
7898    }
7899    return skip_call;
7900}
7901
7902VkBool32 VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkImageSubresourceLayers subLayers,
7903                               VkImageLayout destImageLayout) {
7904    VkBool32 skip_call = VK_FALSE;
7905
7906    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7907    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7908    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7909        uint32_t layer = i + subLayers.baseArrayLayer;
7910        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7911        IMAGE_CMD_BUF_LAYOUT_NODE node;
7912        if (!FindLayout(pCB, destImage, sub, node)) {
7913            SetLayout(pCB, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
7914            continue;
7915        }
7916        if (node.layout != destImageLayout) {
7917            skip_call |=
7918                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7919                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7920                                                                        "doesn't match the current layout %s.",
7921                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7922        }
7923    }
7924    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7925        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7926            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7927            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7928                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7929                                 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7930        } else {
7931            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7932                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
7933                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
7934                                 string_VkImageLayout(destImageLayout));
7935        }
7936    }
7937    return skip_call;
7938}
7939
7940VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7941vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7942               VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
7943    VkBool32 skipCall = VK_FALSE;
7944    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7945    loader_platform_thread_lock_mutex(&globalLock);
7946#if MTMERGESOURCE
7947    VkDeviceMemory mem;
7948    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7949    // Validate that src & dst images have correct usage flags set
7950    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7951    if (cb_data != dev_data->commandBufferMap.end()) {
7952        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImage()", srcImage); };
7953        cb_data->second->validate_functions.push_back(function);
7954    }
7955    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
7956    skipCall |=
7957        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7958    if (cb_data != dev_data->commandBufferMap.end()) {
7959        std::function<VkBool32()> function = [=]() {
7960            set_memory_valid(dev_data, mem, true, dstImage);
7961            return VK_FALSE;
7962        };
7963        cb_data->second->validate_functions.push_back(function);
7964    }
7965    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
7966    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7967                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7968    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7969                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7970#endif
7971    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7972    if (pCB) {
7973        skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGE, "vkCmdCopyImage()");
7974        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImage");
7975        for (uint32_t i = 0; i < regionCount; ++i) {
7976            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].srcSubresource, srcImageLayout);
7977            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].dstSubresource, dstImageLayout);
7978        }
7979    }
7980    loader_platform_thread_unlock_mutex(&globalLock);
7981    if (VK_FALSE == skipCall)
7982        dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7983                                                      regionCount, pRegions);
7984}
7985
7986VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7987vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7988               VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
7989    VkBool32 skipCall = VK_FALSE;
7990    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7991    loader_platform_thread_lock_mutex(&globalLock);
7992#if MTMERGESOURCE
7993    VkDeviceMemory mem;
7994    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7995    // Validate that src & dst images have correct usage flags set
7996    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7997    if (cb_data != dev_data->commandBufferMap.end()) {
7998        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBlitImage()", srcImage); };
7999        cb_data->second->validate_functions.push_back(function);
8000    }
8001    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
8002    skipCall |=
8003        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8004    if (cb_data != dev_data->commandBufferMap.end()) {
8005        std::function<VkBool32()> function = [=]() {
8006            set_memory_valid(dev_data, mem, true, dstImage);
8007            return VK_FALSE;
8008        };
8009        cb_data->second->validate_functions.push_back(function);
8010    }
8011    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
8012    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8013                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8014    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8015                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8016#endif
8017    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8018    if (pCB) {
8019        skipCall |= addCmd(dev_data, pCB, CMD_BLITIMAGE, "vkCmdBlitImage()");
8020        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBlitImage");
8021    }
8022    loader_platform_thread_unlock_mutex(&globalLock);
8023    if (VK_FALSE == skipCall)
8024        dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8025                                                      regionCount, pRegions, filter);
8026}
8027
8028VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
8029                                                                  VkImage dstImage, VkImageLayout dstImageLayout,
8030                                                                  uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8031    VkBool32 skipCall = VK_FALSE;
8032    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8033    loader_platform_thread_lock_mutex(&globalLock);
8034#if MTMERGESOURCE
8035    VkDeviceMemory mem;
8036    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8037    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8038    if (cb_data != dev_data->commandBufferMap.end()) {
8039        std::function<VkBool32()> function = [=]() {
8040            set_memory_valid(dev_data, mem, true, dstImage);
8041            return VK_FALSE;
8042        };
8043        cb_data->second->validate_functions.push_back(function);
8044    }
8045    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
8046    skipCall |=
8047        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8048    if (cb_data != dev_data->commandBufferMap.end()) {
8049        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBufferToImage()"); };
8050        cb_data->second->validate_functions.push_back(function);
8051    }
8052    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
8053    // Validate that src buff & dst image have correct usage flags set
8054    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
8055                                            "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
8056    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8057                                           "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8058#endif
8059    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8060    if (pCB) {
8061        skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
8062        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBufferToImage");
8063        for (uint32_t i = 0; i < regionCount; ++i) {
8064            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].imageSubresource, dstImageLayout);
8065        }
8066    }
8067    loader_platform_thread_unlock_mutex(&globalLock);
8068    if (VK_FALSE == skipCall)
8069        dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
8070                                                              pRegions);
8071}
8072
8073VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
8074                                                                  VkImageLayout srcImageLayout, VkBuffer dstBuffer,
8075                                                                  uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8076    VkBool32 skipCall = VK_FALSE;
8077    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8078    loader_platform_thread_lock_mutex(&globalLock);
8079#if MTMERGESOURCE
8080    VkDeviceMemory mem;
8081    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8082    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8083    if (cb_data != dev_data->commandBufferMap.end()) {
8084        std::function<VkBool32()> function =
8085            [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImageToBuffer()", srcImage); };
8086        cb_data->second->validate_functions.push_back(function);
8087    }
8088    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
8089    skipCall |=
8090        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8091    if (cb_data != dev_data->commandBufferMap.end()) {
8092        std::function<VkBool32()> function = [=]() {
8093            set_memory_valid(dev_data, mem, true);
8094            return VK_FALSE;
8095        };
8096        cb_data->second->validate_functions.push_back(function);
8097    }
8098    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
8099    // Validate that dst buff & src image have correct usage flags set
8100    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8101                                           "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8102    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8103                                            "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8104#endif
8105    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8106    if (pCB) {
8107        skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
8108        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImageToBuffer");
8109        for (uint32_t i = 0; i < regionCount; ++i) {
8110            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].imageSubresource, srcImageLayout);
8111        }
8112    }
8113    loader_platform_thread_unlock_mutex(&globalLock);
8114    if (VK_FALSE == skipCall)
8115        dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
8116                                                              pRegions);
8117}
8118
8119VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
8120                                                             VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
8121    VkBool32 skipCall = VK_FALSE;
8122    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8123    loader_platform_thread_lock_mutex(&globalLock);
8124#if MTMERGESOURCE
8125    VkDeviceMemory mem;
8126    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8127    skipCall =
8128        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8129    if (cb_data != dev_data->commandBufferMap.end()) {
8130        std::function<VkBool32()> function = [=]() {
8131            set_memory_valid(dev_data, mem, true);
8132            return VK_FALSE;
8133        };
8134        cb_data->second->validate_functions.push_back(function);
8135    }
8136    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdUpdateBuffer");
8137    // Validate that dst buff has correct usage flags set
8138    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8139                                            "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8140#endif
8141    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8142    if (pCB) {
8143        skipCall |= addCmd(dev_data, pCB, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
8144        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyUpdateBuffer");
8145    }
8146    loader_platform_thread_unlock_mutex(&globalLock);
8147    if (VK_FALSE == skipCall)
8148        dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
8149}
8150
8151VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8152vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
8153    VkBool32 skipCall = VK_FALSE;
8154    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8155    loader_platform_thread_lock_mutex(&globalLock);
8156#if MTMERGESOURCE
8157    VkDeviceMemory mem;
8158    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8159    skipCall =
8160        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8161    if (cb_data != dev_data->commandBufferMap.end()) {
8162        std::function<VkBool32()> function = [=]() {
8163            set_memory_valid(dev_data, mem, true);
8164            return VK_FALSE;
8165        };
8166        cb_data->second->validate_functions.push_back(function);
8167    }
8168    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdFillBuffer");
8169    // Validate that dst buff has correct usage flags set
8170    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8171                                            "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8172#endif
8173    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8174    if (pCB) {
8175        skipCall |= addCmd(dev_data, pCB, CMD_FILLBUFFER, "vkCmdFillBuffer()");
8176        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyFillBuffer");
8177    }
8178    loader_platform_thread_unlock_mutex(&globalLock);
8179    if (VK_FALSE == skipCall)
8180        dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
8181}
8182
8183VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
8184                                                                 const VkClearAttachment *pAttachments, uint32_t rectCount,
8185                                                                 const VkClearRect *pRects) {
8186    VkBool32 skipCall = VK_FALSE;
8187    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8188    loader_platform_thread_lock_mutex(&globalLock);
8189    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8190    if (pCB) {
8191        skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
8192        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
8193        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
8194            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
8195            // TODO : commandBuffer should be srcObj
8196            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
8197            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
8198            // call CmdClearAttachments
8199            // Otherwise this seems more like a performance warning.
8200            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8201                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
8202                                "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
8203                                " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
8204                                (uint64_t)(commandBuffer));
8205        }
8206        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments");
8207    }
8208
8209    // Validate that attachment is in reference list of active subpass
8210    if (pCB->activeRenderPass) {
8211        const VkRenderPassCreateInfo *pRPCI = dev_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
8212        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
8213
8214        for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
8215            const VkClearAttachment *attachment = &pAttachments[attachment_idx];
8216            if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
8217                VkBool32 found = VK_FALSE;
8218                for (uint32_t i = 0; i < pSD->colorAttachmentCount; i++) {
8219                    if (attachment->colorAttachment == pSD->pColorAttachments[i].attachment) {
8220                        found = VK_TRUE;
8221                        break;
8222                    }
8223                }
8224                if (VK_FALSE == found) {
8225                    skipCall |= log_msg(
8226                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8227                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8228                        "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d",
8229                        attachment->colorAttachment, pCB->activeSubpass);
8230                }
8231            } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
8232                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
8233                    (pSD->pDepthStencilAttachment->attachment ==
8234                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
8235
8236                    skipCall |= log_msg(
8237                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8238                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8239                        "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found "
8240                        "in active subpass %d",
8241                        attachment->colorAttachment,
8242                        (pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED,
8243                        pCB->activeSubpass);
8244                }
8245            }
8246        }
8247    }
8248    loader_platform_thread_unlock_mutex(&globalLock);
8249    if (VK_FALSE == skipCall)
8250        dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8251}
8252
8253VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
8254                                                                VkImageLayout imageLayout, const VkClearColorValue *pColor,
8255                                                                uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
8256    VkBool32 skipCall = VK_FALSE;
8257    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8258    loader_platform_thread_lock_mutex(&globalLock);
8259#if MTMERGESOURCE
8260    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8261    VkDeviceMemory mem;
8262    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8263    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8264    if (cb_data != dev_data->commandBufferMap.end()) {
8265        std::function<VkBool32()> function = [=]() {
8266            set_memory_valid(dev_data, mem, true, image);
8267            return VK_FALSE;
8268        };
8269        cb_data->second->validate_functions.push_back(function);
8270    }
8271    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearColorImage");
8272#endif
8273    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8274    if (pCB) {
8275        skipCall |= addCmd(dev_data, pCB, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
8276        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearColorImage");
8277    }
8278    loader_platform_thread_unlock_mutex(&globalLock);
8279    if (VK_FALSE == skipCall)
8280        dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
8281}
8282
8283VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8284vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8285                            const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
8286                            const VkImageSubresourceRange *pRanges) {
8287    VkBool32 skipCall = VK_FALSE;
8288    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8289    loader_platform_thread_lock_mutex(&globalLock);
8290#if MTMERGESOURCE
8291    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8292    VkDeviceMemory mem;
8293    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8294    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8295    if (cb_data != dev_data->commandBufferMap.end()) {
8296        std::function<VkBool32()> function = [=]() {
8297            set_memory_valid(dev_data, mem, true, image);
8298            return VK_FALSE;
8299        };
8300        cb_data->second->validate_functions.push_back(function);
8301    }
8302    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearDepthStencilImage");
8303#endif
8304    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8305    if (pCB) {
8306        skipCall |= addCmd(dev_data, pCB, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
8307        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearDepthStencilImage");
8308    }
8309    loader_platform_thread_unlock_mutex(&globalLock);
8310    if (VK_FALSE == skipCall)
8311        dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
8312                                                                   pRanges);
8313}
8314
8315VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8316vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8317                  VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
8318    VkBool32 skipCall = VK_FALSE;
8319    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8320    loader_platform_thread_lock_mutex(&globalLock);
8321#if MTMERGESOURCE
8322    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8323    VkDeviceMemory mem;
8324    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8325    if (cb_data != dev_data->commandBufferMap.end()) {
8326        std::function<VkBool32()> function =
8327            [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdResolveImage()", srcImage); };
8328        cb_data->second->validate_functions.push_back(function);
8329    }
8330    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
8331    skipCall |=
8332        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8333    if (cb_data != dev_data->commandBufferMap.end()) {
8334        std::function<VkBool32()> function = [=]() {
8335            set_memory_valid(dev_data, mem, true, dstImage);
8336            return VK_FALSE;
8337        };
8338        cb_data->second->validate_functions.push_back(function);
8339    }
8340    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
8341#endif
8342    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8343    if (pCB) {
8344        skipCall |= addCmd(dev_data, pCB, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
8345        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResolveImage");
8346    }
8347    loader_platform_thread_unlock_mutex(&globalLock);
8348    if (VK_FALSE == skipCall)
8349        dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8350                                                         regionCount, pRegions);
8351}
8352
8353bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8354    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8355    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8356    if (pCB) {
8357        pCB->eventToStageMap[event] = stageMask;
8358    }
8359    auto queue_data = dev_data->queueMap.find(queue);
8360    if (queue_data != dev_data->queueMap.end()) {
8361        queue_data->second.eventToStageMap[event] = stageMask;
8362    }
8363    return false;
8364}
8365
8366VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8367vkCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8368    VkBool32 skipCall = VK_FALSE;
8369    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8370    loader_platform_thread_lock_mutex(&globalLock);
8371    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8372    if (pCB) {
8373        skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
8374        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
8375        pCB->events.push_back(event);
8376        std::function<bool(VkQueue)> eventUpdate =
8377            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
8378        pCB->eventUpdates.push_back(eventUpdate);
8379    }
8380    loader_platform_thread_unlock_mutex(&globalLock);
8381    if (VK_FALSE == skipCall)
8382        dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask);
8383}
8384
8385VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8386vkCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8387    VkBool32 skipCall = VK_FALSE;
8388    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8389    loader_platform_thread_lock_mutex(&globalLock);
8390    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8391    if (pCB) {
8392        skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
8393        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
8394        pCB->events.push_back(event);
8395        std::function<bool(VkQueue)> eventUpdate =
8396            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
8397        pCB->eventUpdates.push_back(eventUpdate);
8398    }
8399    loader_platform_thread_unlock_mutex(&globalLock);
8400    if (VK_FALSE == skipCall)
8401        dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask);
8402}
8403
8404VkBool32 TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount, const VkImageMemoryBarrier *pImgMemBarriers) {
8405    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8406    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8407    VkBool32 skip = VK_FALSE;
8408    uint32_t levelCount = 0;
8409    uint32_t layerCount = 0;
8410
8411    for (uint32_t i = 0; i < memBarrierCount; ++i) {
8412        auto mem_barrier = &pImgMemBarriers[i];
8413        if (!mem_barrier)
8414            continue;
8415        // TODO: Do not iterate over every possibility - consolidate where
8416        // possible
8417        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
8418
8419        for (uint32_t j = 0; j < levelCount; j++) {
8420            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
8421            for (uint32_t k = 0; k < layerCount; k++) {
8422                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
8423                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
8424                IMAGE_CMD_BUF_LAYOUT_NODE node;
8425                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
8426                    SetLayout(pCB, mem_barrier->image, sub,
8427                              IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
8428                    continue;
8429                }
8430                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
8431                    // TODO: Set memory invalid which is in mem_tracker currently
8432                } else if (node.layout != mem_barrier->oldLayout) {
8433                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8434                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
8435                                                                                    "when current layout is %s.",
8436                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
8437                }
8438                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
8439            }
8440        }
8441    }
8442    return skip;
8443}
8444
8445// Print readable FlagBits in FlagMask
8446std::string string_VkAccessFlags(VkAccessFlags accessMask) {
8447    std::string result;
8448    std::string separator;
8449
8450    if (accessMask == 0) {
8451        result = "[None]";
8452    } else {
8453        result = "[";
8454        for (auto i = 0; i < 32; i++) {
8455            if (accessMask & (1 << i)) {
8456                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
8457                separator = " | ";
8458            }
8459        }
8460        result = result + "]";
8461    }
8462    return result;
8463}
8464
8465// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
8466// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
8467// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
8468VkBool32 ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8469                          const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits, const char *type) {
8470    VkBool32 skip_call = VK_FALSE;
8471
8472    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
8473        if (accessMask & !(required_bit | optional_bits)) {
8474            // TODO: Verify against Valid Use
8475            skip_call |=
8476                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8477                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8478                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8479        }
8480    } else {
8481        if (!required_bit) {
8482            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8483                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
8484                                                                  "%s when layout is %s, unless the app has previously added a "
8485                                                                  "barrier for this transition.",
8486                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
8487                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
8488        } else {
8489            std::string opt_bits;
8490            if (optional_bits != 0) {
8491                std::stringstream ss;
8492                ss << optional_bits;
8493                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
8494            }
8495            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8496                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
8497                                                                  "layout is %s, unless the app has previously added a barrier for "
8498                                                                  "this transition.",
8499                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
8500                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
8501        }
8502    }
8503    return skip_call;
8504}
8505
8506VkBool32 ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8507                                     const VkImageLayout &layout, const char *type) {
8508    VkBool32 skip_call = VK_FALSE;
8509    switch (layout) {
8510    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
8511        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
8512                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
8513        break;
8514    }
8515    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
8516        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
8517                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
8518        break;
8519    }
8520    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
8521        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
8522        break;
8523    }
8524    case VK_IMAGE_LAYOUT_PREINITIALIZED: {
8525        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
8526        break;
8527    }
8528    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
8529        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8530                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8531        break;
8532    }
8533    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
8534        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8535                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8536        break;
8537    }
8538    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
8539        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
8540        break;
8541    }
8542    case VK_IMAGE_LAYOUT_UNDEFINED: {
8543        if (accessMask != 0) {
8544            // TODO: Verify against Valid Use section spec
8545            skip_call |=
8546                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8547                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8548                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8549        }
8550        break;
8551    }
8552    case VK_IMAGE_LAYOUT_GENERAL:
8553    default: { break; }
8554    }
8555    return skip_call;
8556}
8557
8558VkBool32 ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8559                          const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
8560                          const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
8561                          const VkImageMemoryBarrier *pImageMemBarriers) {
8562    VkBool32 skip_call = VK_FALSE;
8563    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8564    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8565    if (pCB->activeRenderPass && memBarrierCount) {
8566        if (!dev_data->renderPassMap[pCB->activeRenderPass]->hasSelfDependency[pCB->activeSubpass]) {
8567            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8568                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
8569                                                                  "with no self dependency specified.",
8570                                 funcName, pCB->activeSubpass);
8571        }
8572    }
8573    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
8574        auto mem_barrier = &pImageMemBarriers[i];
8575        auto image_data = dev_data->imageMap.find(mem_barrier->image);
8576        if (image_data != dev_data->imageMap.end()) {
8577            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
8578            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
8579            if (image_data->second.createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
8580                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
8581                // be VK_QUEUE_FAMILY_IGNORED
8582                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
8583                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8584                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8585                                         "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
8586                                         "VK_SHARING_MODE_CONCURRENT.  Src and dst "
8587                                         " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
8588                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8589                }
8590            } else {
8591                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
8592                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
8593                // or both be a valid queue family
8594                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
8595                    (src_q_f_index != dst_q_f_index)) {
8596                    skip_call |=
8597                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8598                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
8599                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
8600                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
8601                                                                     "must be.",
8602                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8603                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
8604                           ((src_q_f_index >= dev_data->physDevProperties.queue_family_properties.size()) ||
8605                            (dst_q_f_index >= dev_data->physDevProperties.queue_family_properties.size()))) {
8606                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8607                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8608                                         "%s: Image 0x%" PRIx64 " was created with sharingMode "
8609                                         "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
8610                                         " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
8611                                         "queueFamilies crated for this device.",
8612                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
8613                                         dst_q_f_index, dev_data->physDevProperties.queue_family_properties.size());
8614                }
8615            }
8616        }
8617
8618        if (mem_barrier) {
8619            skip_call |=
8620                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
8621            skip_call |=
8622                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
8623            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
8624                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8625                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
8626                                                         "PREINITIALIZED.",
8627                        funcName);
8628            }
8629            auto image_data = dev_data->imageMap.find(mem_barrier->image);
8630            VkFormat format;
8631            uint32_t arrayLayers, mipLevels;
8632            bool imageFound = false;
8633            if (image_data != dev_data->imageMap.end()) {
8634                format = image_data->second.createInfo.format;
8635                arrayLayers = image_data->second.createInfo.arrayLayers;
8636                mipLevels = image_data->second.createInfo.mipLevels;
8637                imageFound = true;
8638            } else if (dev_data->device_extensions.wsi_enabled) {
8639                auto imageswap_data = dev_data->device_extensions.imageToSwapchainMap.find(mem_barrier->image);
8640                if (imageswap_data != dev_data->device_extensions.imageToSwapchainMap.end()) {
8641                    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(imageswap_data->second);
8642                    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
8643                        format = swapchain_data->second->createInfo.imageFormat;
8644                        arrayLayers = swapchain_data->second->createInfo.imageArrayLayers;
8645                        mipLevels = 1;
8646                        imageFound = true;
8647                    }
8648                }
8649            }
8650            if (imageFound) {
8651                if (vk_format_is_depth_and_stencil(format) &&
8652                    (!(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
8653                     !(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))) {
8654                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8655                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth and stencil format and thus must "
8656                                                             "have both VK_IMAGE_ASPECT_DEPTH_BIT and "
8657                                                             "VK_IMAGE_ASPECT_STENCIL_BIT set.",
8658                            funcName);
8659                }
8660                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
8661                                     ? 1
8662                                     : mem_barrier->subresourceRange.layerCount;
8663                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
8664                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8665                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
8666                                                             "baseArrayLayer (%d) and layerCount (%d) be less "
8667                                                             "than or equal to the total number of layers (%d).",
8668                            funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
8669                            arrayLayers);
8670                }
8671                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
8672                                     ? 1
8673                                     : mem_barrier->subresourceRange.levelCount;
8674                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
8675                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8676                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
8677                                                             "(%d) and levelCount (%d) be less than or equal to "
8678                                                             "the total number of levels (%d).",
8679                            funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
8680                            mipLevels);
8681                }
8682            }
8683        }
8684    }
8685    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
8686        auto mem_barrier = &pBufferMemBarriers[i];
8687        if (pCB->activeRenderPass) {
8688            skip_call |=
8689                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8690                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
8691        }
8692        if (!mem_barrier)
8693            continue;
8694
8695        // Validate buffer barrier queue family indices
8696        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8697             mem_barrier->srcQueueFamilyIndex >= dev_data->physDevProperties.queue_family_properties.size()) ||
8698            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8699             mem_barrier->dstQueueFamilyIndex >= dev_data->physDevProperties.queue_family_properties.size())) {
8700            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8701                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8702                                 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
8703                                 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
8704                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8705                                 dev_data->physDevProperties.queue_family_properties.size());
8706        }
8707
8708        auto buffer_data = dev_data->bufferMap.find(mem_barrier->buffer);
8709        uint64_t buffer_size =
8710            buffer_data->second.create_info ? reinterpret_cast<uint64_t &>(buffer_data->second.create_info->size) : 0;
8711        if (buffer_data != dev_data->bufferMap.end()) {
8712            if (mem_barrier->offset >= buffer_size) {
8713                skip_call |=
8714                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8715                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64
8716                                                             " whose sum is not less than total size %" PRIu64 ".",
8717                            funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8718                            reinterpret_cast<const uint64_t &>(mem_barrier->offset), buffer_size);
8719            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
8720                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8721                                     __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
8722                                     "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64 " and size %" PRIu64
8723                                     " whose sum is greater than total size %" PRIu64 ".",
8724                                     funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8725                                     reinterpret_cast<const uint64_t &>(mem_barrier->offset),
8726                                     reinterpret_cast<const uint64_t &>(mem_barrier->size), buffer_size);
8727            }
8728        }
8729    }
8730    return skip_call;
8731}
8732
8733bool validateEventStageMask(VkQueue queue, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask) {
8734    bool skip_call = false;
8735    VkPipelineStageFlags stageMask = 0;
8736    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
8737    for (uint32_t i = 0; i < eventCount; ++i) {
8738        auto queue_data = dev_data->queueMap.find(queue);
8739        if (queue_data == dev_data->queueMap.end())
8740            return false;
8741        auto event_data = queue_data->second.eventToStageMap.find(pEvents[i]);
8742        if (event_data != queue_data->second.eventToStageMap.end()) {
8743            stageMask |= event_data->second;
8744        } else {
8745            auto global_event_data = dev_data->eventMap.find(pEvents[i]);
8746            if (global_event_data == dev_data->eventMap.end()) {
8747                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8748                                     reinterpret_cast<const uint64_t &>(pEvents[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
8749                                     "Fence 0x%" PRIx64 " cannot be waited on if it has never been set.",
8750                                     reinterpret_cast<const uint64_t &>(pEvents[i]));
8751            } else {
8752                stageMask |= global_event_data->second.stageMask;
8753            }
8754        }
8755    }
8756    if (sourceStageMask != stageMask) {
8757        skip_call |=
8758            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8759                    DRAWSTATE_INVALID_FENCE, "DS",
8760                    "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%x which must be the bitwise OR of the "
8761                    "stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with vkSetEvent.",
8762                    sourceStageMask);
8763    }
8764    return skip_call;
8765}
8766
8767VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8768vkCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
8769                VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8770                uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8771                uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8772    VkBool32 skipCall = VK_FALSE;
8773    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8774    loader_platform_thread_lock_mutex(&globalLock);
8775    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8776    if (pCB) {
8777        for (uint32_t i = 0; i < eventCount; ++i) {
8778            pCB->waitedEvents.push_back(pEvents[i]);
8779            pCB->events.push_back(pEvents[i]);
8780        }
8781        std::function<bool(VkQueue)> eventUpdate =
8782            std::bind(validateEventStageMask, std::placeholders::_1, eventCount, pEvents, sourceStageMask);
8783        pCB->eventUpdates.push_back(eventUpdate);
8784        if (pCB->state == CB_RECORDING) {
8785            skipCall |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
8786        } else {
8787            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
8788        }
8789        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8790        skipCall |=
8791            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8792                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8793    }
8794    loader_platform_thread_unlock_mutex(&globalLock);
8795    if (VK_FALSE == skipCall)
8796        dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
8797                                                       memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8798                                                       pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8799}
8800
8801VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8802vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
8803                     VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8804                     uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8805                     uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8806    VkBool32 skipCall = VK_FALSE;
8807    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8808    loader_platform_thread_lock_mutex(&globalLock);
8809    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8810    if (pCB) {
8811        skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8812        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8813        skipCall |=
8814            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8815                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8816    }
8817    loader_platform_thread_unlock_mutex(&globalLock);
8818    if (VK_FALSE == skipCall)
8819        dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
8820                                                            memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8821                                                            pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8822}
8823
8824VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8825vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8826    VkBool32 skipCall = VK_FALSE;
8827    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8828    loader_platform_thread_lock_mutex(&globalLock);
8829    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8830    if (pCB) {
8831        QueryObject query = {queryPool, slot};
8832        pCB->activeQueries.insert(query);
8833        if (!pCB->startedQueries.count(query)) {
8834            pCB->startedQueries.insert(query);
8835        }
8836        skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8837    }
8838    loader_platform_thread_unlock_mutex(&globalLock);
8839    if (VK_FALSE == skipCall)
8840        dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
8841}
8842
8843VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8844    VkBool32 skipCall = VK_FALSE;
8845    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8846    loader_platform_thread_lock_mutex(&globalLock);
8847    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8848    if (pCB) {
8849        QueryObject query = {queryPool, slot};
8850        if (!pCB->activeQueries.count(query)) {
8851            skipCall |=
8852                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8853                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool %" PRIu64 ", index %d",
8854                        (uint64_t)(queryPool), slot);
8855        } else {
8856            pCB->activeQueries.erase(query);
8857        }
8858        pCB->queryToStateMap[query] = 1;
8859        if (pCB->state == CB_RECORDING) {
8860            skipCall |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
8861        } else {
8862            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
8863        }
8864    }
8865    loader_platform_thread_unlock_mutex(&globalLock);
8866    if (VK_FALSE == skipCall)
8867        dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
8868}
8869
8870VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8871vkCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
8872    VkBool32 skipCall = VK_FALSE;
8873    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8874    loader_platform_thread_lock_mutex(&globalLock);
8875    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8876    if (pCB) {
8877        for (uint32_t i = 0; i < queryCount; i++) {
8878            QueryObject query = {queryPool, firstQuery + i};
8879            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
8880            pCB->queryToStateMap[query] = 0;
8881        }
8882        if (pCB->state == CB_RECORDING) {
8883            skipCall |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8884        } else {
8885            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
8886        }
8887        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
8888    }
8889    loader_platform_thread_unlock_mutex(&globalLock);
8890    if (VK_FALSE == skipCall)
8891        dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8892}
8893
8894VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8895vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
8896                          VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
8897    VkBool32 skipCall = VK_FALSE;
8898    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8899    loader_platform_thread_lock_mutex(&globalLock);
8900    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8901#if MTMERGESOURCE
8902    VkDeviceMemory mem;
8903    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8904    skipCall |=
8905        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8906    if (cb_data != dev_data->commandBufferMap.end()) {
8907        std::function<VkBool32()> function = [=]() {
8908            set_memory_valid(dev_data, mem, true);
8909            return VK_FALSE;
8910        };
8911        cb_data->second->validate_functions.push_back(function);
8912    }
8913    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyQueryPoolResults");
8914    // Validate that DST buffer has correct usage flags set
8915    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8916                                            "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8917#endif
8918    if (pCB) {
8919        for (uint32_t i = 0; i < queryCount; i++) {
8920            QueryObject query = {queryPool, firstQuery + i};
8921            if (!pCB->queryToStateMap[query]) {
8922                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8923                                    __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
8924                                    "Requesting a copy from query to buffer with invalid query: queryPool %" PRIu64 ", index %d",
8925                                    (uint64_t)(queryPool), firstQuery + i);
8926            }
8927        }
8928        if (pCB->state == CB_RECORDING) {
8929            skipCall |= addCmd(dev_data, pCB, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8930        } else {
8931            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
8932        }
8933        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyQueryPoolResults");
8934    }
8935    loader_platform_thread_unlock_mutex(&globalLock);
8936    if (VK_FALSE == skipCall)
8937        dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
8938                                                                 dstOffset, stride, flags);
8939}
8940
8941VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
8942                                                              VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
8943                                                              const void *pValues) {
8944    bool skipCall = false;
8945    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8946    loader_platform_thread_lock_mutex(&globalLock);
8947    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8948    if (pCB) {
8949        if (pCB->state == CB_RECORDING) {
8950            skipCall |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8951        } else {
8952            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
8953        }
8954    }
8955    if ((offset + size) > dev_data->physDevProperties.properties.limits.maxPushConstantsSize) {
8956        skipCall |= validatePushConstantSize(dev_data, offset, size, "vkCmdPushConstants()");
8957    }
8958    // TODO : Add warning if push constant update doesn't align with range
8959    loader_platform_thread_unlock_mutex(&globalLock);
8960    if (!skipCall)
8961        dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8962}
8963
8964VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8965vkCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
8966    VkBool32 skipCall = VK_FALSE;
8967    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8968    loader_platform_thread_lock_mutex(&globalLock);
8969    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8970    if (pCB) {
8971        QueryObject query = {queryPool, slot};
8972        pCB->queryToStateMap[query] = 1;
8973        if (pCB->state == CB_RECORDING) {
8974            skipCall |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8975        } else {
8976            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
8977        }
8978    }
8979    loader_platform_thread_unlock_mutex(&globalLock);
8980    if (VK_FALSE == skipCall)
8981        dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8982}
8983
8984VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
8985                                                                   const VkAllocationCallbacks *pAllocator,
8986                                                                   VkFramebuffer *pFramebuffer) {
8987    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8988    VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
8989    if (VK_SUCCESS == result) {
8990        // Shadow create info and store in map
8991        VkFramebufferCreateInfo *localFBCI = new VkFramebufferCreateInfo(*pCreateInfo);
8992        if (pCreateInfo->pAttachments) {
8993            localFBCI->pAttachments = new VkImageView[localFBCI->attachmentCount];
8994            memcpy((void *)localFBCI->pAttachments, pCreateInfo->pAttachments, localFBCI->attachmentCount * sizeof(VkImageView));
8995        }
8996        FRAMEBUFFER_NODE fbNode = {};
8997        fbNode.createInfo = *localFBCI;
8998        std::pair<VkFramebuffer, FRAMEBUFFER_NODE> fbPair(*pFramebuffer, fbNode);
8999        loader_platform_thread_lock_mutex(&globalLock);
9000        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9001            VkImageView view = pCreateInfo->pAttachments[i];
9002            auto view_data = dev_data->imageViewMap.find(view);
9003            if (view_data == dev_data->imageViewMap.end()) {
9004                continue;
9005            }
9006            MT_FB_ATTACHMENT_INFO fb_info;
9007            get_mem_binding_from_object(dev_data, device, (uint64_t)(view_data->second.image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9008                                        &fb_info.mem);
9009            fb_info.image = view_data->second.image;
9010            fbPair.second.attachments.push_back(fb_info);
9011        }
9012        dev_data->frameBufferMap.insert(fbPair);
9013        loader_platform_thread_unlock_mutex(&globalLock);
9014    }
9015    return result;
9016}
9017
9018VkBool32 FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
9019                        std::unordered_set<uint32_t> &processed_nodes) {
9020    // If we have already checked this node we have not found a dependency path so return false.
9021    if (processed_nodes.count(index))
9022        return VK_FALSE;
9023    processed_nodes.insert(index);
9024    const DAGNode &node = subpass_to_node[index];
9025    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
9026    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
9027        for (auto elem : node.prev) {
9028            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
9029                return VK_TRUE;
9030        }
9031    } else {
9032        return VK_TRUE;
9033    }
9034    return VK_FALSE;
9035}
9036
9037VkBool32 CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
9038                               const std::vector<DAGNode> &subpass_to_node, VkBool32 &skip_call) {
9039    VkBool32 result = VK_TRUE;
9040    // Loop through all subpasses that share the same attachment and make sure a dependency exists
9041    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
9042        if (subpass == dependent_subpasses[k])
9043            continue;
9044        const DAGNode &node = subpass_to_node[subpass];
9045        // Check for a specified dependency between the two nodes. If one exists we are done.
9046        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
9047        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
9048        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
9049            // If no dependency exits an implicit dependency still might. If so, warn and if not throw an error.
9050            std::unordered_set<uint32_t> processed_nodes;
9051            if (FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
9052                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes)) {
9053                // TODO: Verify against Valid Use section of spec
9054                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9055                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9056                                     "A dependency between subpasses %d and %d must exist but only an implicit one is specified.",
9057                                     subpass, dependent_subpasses[k]);
9058            } else {
9059                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9060                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9061                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
9062                                     dependent_subpasses[k]);
9063                result = VK_FALSE;
9064            }
9065        }
9066    }
9067    return result;
9068}
9069
9070VkBool32 CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
9071                        const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, VkBool32 &skip_call) {
9072    const DAGNode &node = subpass_to_node[index];
9073    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
9074    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9075    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9076        if (attachment == subpass.pColorAttachments[j].attachment)
9077            return VK_TRUE;
9078    }
9079    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9080        if (attachment == subpass.pDepthStencilAttachment->attachment)
9081            return VK_TRUE;
9082    }
9083    VkBool32 result = VK_FALSE;
9084    // Loop through previous nodes and see if any of them write to the attachment.
9085    for (auto elem : node.prev) {
9086        result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
9087    }
9088    // If the attachment was written to by a previous node than this node needs to preserve it.
9089    if (result && depth > 0) {
9090        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9091        VkBool32 has_preserved = VK_FALSE;
9092        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9093            if (subpass.pPreserveAttachments[j] == attachment) {
9094                has_preserved = VK_TRUE;
9095                break;
9096            }
9097        }
9098        if (has_preserved == VK_FALSE) {
9099            skip_call |=
9100                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9101                        DRAWSTATE_INVALID_RENDERPASS, "DS",
9102                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
9103        }
9104    }
9105    return result;
9106}
9107
9108template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
9109    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
9110           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
9111}
9112
9113bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
9114    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
9115            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
9116}
9117
9118VkBool32 ValidateDependencies(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin,
9119                              const std::vector<DAGNode> &subpass_to_node) {
9120    VkBool32 skip_call = VK_FALSE;
9121    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
9122    const VkRenderPassCreateInfo *pCreateInfo = my_data->renderPassMap.at(pRenderPassBegin->renderPass)->pCreateInfo;
9123    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
9124    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
9125    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
9126    // Find overlapping attachments
9127    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9128        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
9129            VkImageView viewi = pFramebufferInfo->pAttachments[i];
9130            VkImageView viewj = pFramebufferInfo->pAttachments[j];
9131            if (viewi == viewj) {
9132                overlapping_attachments[i].push_back(j);
9133                overlapping_attachments[j].push_back(i);
9134                continue;
9135            }
9136            auto view_data_i = my_data->imageViewMap.find(viewi);
9137            auto view_data_j = my_data->imageViewMap.find(viewj);
9138            if (view_data_i == my_data->imageViewMap.end() || view_data_j == my_data->imageViewMap.end()) {
9139                continue;
9140            }
9141            if (view_data_i->second.image == view_data_j->second.image &&
9142                isRegionOverlapping(view_data_i->second.subresourceRange, view_data_j->second.subresourceRange)) {
9143                overlapping_attachments[i].push_back(j);
9144                overlapping_attachments[j].push_back(i);
9145                continue;
9146            }
9147            auto image_data_i = my_data->imageMap.find(view_data_i->second.image);
9148            auto image_data_j = my_data->imageMap.find(view_data_j->second.image);
9149            if (image_data_i == my_data->imageMap.end() || image_data_j == my_data->imageMap.end()) {
9150                continue;
9151            }
9152            if (image_data_i->second.mem == image_data_j->second.mem &&
9153                isRangeOverlapping(image_data_i->second.memOffset, image_data_i->second.memSize, image_data_j->second.memOffset,
9154                                   image_data_j->second.memSize)) {
9155                overlapping_attachments[i].push_back(j);
9156                overlapping_attachments[j].push_back(i);
9157            }
9158        }
9159    }
9160    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
9161        uint32_t attachment = i;
9162        for (auto other_attachment : overlapping_attachments[i]) {
9163            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9164                skip_call |=
9165                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9166                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9167                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9168                            attachment, other_attachment);
9169            }
9170            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9171                skip_call |=
9172                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9173                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9174                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9175                            other_attachment, attachment);
9176            }
9177        }
9178    }
9179    // Find for each attachment the subpasses that use them.
9180    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9181        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9182        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9183            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9184            input_attachment_to_subpass[attachment].push_back(i);
9185            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9186                input_attachment_to_subpass[overlapping_attachment].push_back(i);
9187            }
9188        }
9189        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9190            uint32_t attachment = subpass.pColorAttachments[j].attachment;
9191            output_attachment_to_subpass[attachment].push_back(i);
9192            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9193                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9194            }
9195        }
9196        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9197            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9198            output_attachment_to_subpass[attachment].push_back(i);
9199            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9200                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9201            }
9202        }
9203    }
9204    // If there is a dependency needed make sure one exists
9205    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9206        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9207        // If the attachment is an input then all subpasses that output must have a dependency relationship
9208        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9209            const uint32_t &attachment = subpass.pInputAttachments[j].attachment;
9210            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9211        }
9212        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
9213        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9214            const uint32_t &attachment = subpass.pColorAttachments[j].attachment;
9215            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9216            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9217        }
9218        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9219            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
9220            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9221            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9222        }
9223    }
9224    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
9225    // written.
9226    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9227        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9228        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9229            CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
9230        }
9231    }
9232    return skip_call;
9233}
9234
9235VkBool32 ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
9236    VkBool32 skip = VK_FALSE;
9237
9238    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9239        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9240        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9241            if (subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
9242                subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
9243                if (subpass.pInputAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
9244                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9245                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9246                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9247                                    "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
9248                } else {
9249                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9250                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9251                                    "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
9252                                    string_VkImageLayout(subpass.pInputAttachments[j].layout));
9253                }
9254            }
9255        }
9256        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9257            if (subpass.pColorAttachments[j].layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
9258                if (subpass.pColorAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
9259                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9260                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9261                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9262                                    "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
9263                } else {
9264                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9265                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9266                                    "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
9267                                    string_VkImageLayout(subpass.pColorAttachments[j].layout));
9268                }
9269            }
9270        }
9271        if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9272            if (subpass.pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
9273                if (subpass.pDepthStencilAttachment->layout == VK_IMAGE_LAYOUT_GENERAL) {
9274                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9275                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9276                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9277                                    "Layout for depth attachment is GENERAL but should be DEPTH_STENCIL_ATTACHMENT_OPTIMAL.");
9278                } else {
9279                    skip |=
9280                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9281                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9282                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.",
9283                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
9284                }
9285            }
9286        }
9287    }
9288    return skip;
9289}
9290
9291VkBool32 CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9292                       std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
9293    VkBool32 skip_call = VK_FALSE;
9294    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9295        DAGNode &subpass_node = subpass_to_node[i];
9296        subpass_node.pass = i;
9297    }
9298    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9299        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
9300        if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
9301            dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9302            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9303                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
9304                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
9305        } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
9306            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9307                                 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
9308        } else if (dependency.srcSubpass == dependency.dstSubpass) {
9309            has_self_dependency[dependency.srcSubpass] = true;
9310        }
9311        if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9312            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
9313        }
9314        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
9315            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
9316        }
9317    }
9318    return skip_call;
9319}
9320
9321
9322VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
9323                                                                    const VkAllocationCallbacks *pAllocator,
9324                                                                    VkShaderModule *pShaderModule) {
9325    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9326    VkBool32 skip_call = VK_FALSE;
9327    if (!shader_is_spirv(pCreateInfo)) {
9328        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
9329                             /* dev */ 0, __LINE__, SHADER_CHECKER_NON_SPIRV_SHADER, "SC", "Shader is not SPIR-V");
9330    }
9331
9332    if (VK_FALSE != skip_call)
9333        return VK_ERROR_VALIDATION_FAILED_EXT;
9334
9335    VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
9336
9337    if (res == VK_SUCCESS) {
9338        loader_platform_thread_lock_mutex(&globalLock);
9339        my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
9340        loader_platform_thread_unlock_mutex(&globalLock);
9341    }
9342    return res;
9343}
9344
9345VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9346                                                                  const VkAllocationCallbacks *pAllocator,
9347                                                                  VkRenderPass *pRenderPass) {
9348    VkBool32 skip_call = VK_FALSE;
9349    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9350    loader_platform_thread_lock_mutex(&globalLock);
9351    // Create DAG
9352    std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
9353    std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
9354    skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
9355    // Validate
9356    skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
9357    if (VK_FALSE != skip_call) {
9358        loader_platform_thread_unlock_mutex(&globalLock);
9359        return VK_ERROR_VALIDATION_FAILED_EXT;
9360    }
9361    loader_platform_thread_unlock_mutex(&globalLock);
9362    VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
9363    if (VK_SUCCESS == result) {
9364        loader_platform_thread_lock_mutex(&globalLock);
9365        // TODOSC : Merge in tracking of renderpass from shader_checker
9366        // Shadow create info and store in map
9367        VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
9368        if (pCreateInfo->pAttachments) {
9369            localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount];
9370            memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments,
9371                   localRPCI->attachmentCount * sizeof(VkAttachmentDescription));
9372        }
9373        if (pCreateInfo->pSubpasses) {
9374            localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount];
9375            memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription));
9376
9377            for (uint32_t i = 0; i < localRPCI->subpassCount; i++) {
9378                VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i];
9379                const uint32_t attachmentCount = subpass->inputAttachmentCount +
9380                                                 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) +
9381                                                 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
9382                VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount];
9383
9384                memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount);
9385                subpass->pInputAttachments = attachments;
9386                attachments += subpass->inputAttachmentCount;
9387
9388                memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9389                subpass->pColorAttachments = attachments;
9390                attachments += subpass->colorAttachmentCount;
9391
9392                if (subpass->pResolveAttachments) {
9393                    memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9394                    subpass->pResolveAttachments = attachments;
9395                    attachments += subpass->colorAttachmentCount;
9396                }
9397
9398                if (subpass->pDepthStencilAttachment) {
9399                    memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1);
9400                    subpass->pDepthStencilAttachment = attachments;
9401                    attachments += 1;
9402                }
9403
9404                memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount);
9405                subpass->pPreserveAttachments = &attachments->attachment;
9406            }
9407        }
9408        if (pCreateInfo->pDependencies) {
9409            localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
9410            memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies,
9411                   localRPCI->dependencyCount * sizeof(VkSubpassDependency));
9412        }
9413        dev_data->renderPassMap[*pRenderPass] = new RENDER_PASS_NODE(localRPCI);
9414        dev_data->renderPassMap[*pRenderPass]->hasSelfDependency = has_self_dependency;
9415        dev_data->renderPassMap[*pRenderPass]->subpassToNode = subpass_to_node;
9416#if MTMERGESOURCE
9417        // MTMTODO : Merge with code from above to eliminate duplication
9418        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9419            VkAttachmentDescription desc = pCreateInfo->pAttachments[i];
9420            MT_PASS_ATTACHMENT_INFO pass_info;
9421            pass_info.load_op = desc.loadOp;
9422            pass_info.store_op = desc.storeOp;
9423            pass_info.attachment = i;
9424            dev_data->renderPassMap[*pRenderPass]->attachments.push_back(pass_info);
9425        }
9426        // TODO: Maybe fill list and then copy instead of locking
9427        std::unordered_map<uint32_t, bool> &attachment_first_read = dev_data->renderPassMap[*pRenderPass]->attachment_first_read;
9428        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout =
9429            dev_data->renderPassMap[*pRenderPass]->attachment_first_layout;
9430        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9431            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9432            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9433                uint32_t attachment = subpass.pInputAttachments[j].attachment;
9434                if (attachment_first_read.count(attachment))
9435                    continue;
9436                attachment_first_read.insert(std::make_pair(attachment, true));
9437                attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
9438            }
9439            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9440                uint32_t attachment = subpass.pColorAttachments[j].attachment;
9441                if (attachment_first_read.count(attachment))
9442                    continue;
9443                attachment_first_read.insert(std::make_pair(attachment, false));
9444                attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
9445            }
9446            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9447                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9448                if (attachment_first_read.count(attachment))
9449                    continue;
9450                attachment_first_read.insert(std::make_pair(attachment, false));
9451                attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
9452            }
9453        }
9454#endif
9455        loader_platform_thread_unlock_mutex(&globalLock);
9456    }
9457    return result;
9458}
9459// Free the renderpass shadow
9460static void deleteRenderPasses(layer_data *my_data) {
9461    if (my_data->renderPassMap.size() <= 0)
9462        return;
9463    for (auto ii = my_data->renderPassMap.begin(); ii != my_data->renderPassMap.end(); ++ii) {
9464        const VkRenderPassCreateInfo *pRenderPassInfo = (*ii).second->pCreateInfo;
9465        delete[] pRenderPassInfo->pAttachments;
9466        if (pRenderPassInfo->pSubpasses) {
9467            for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) {
9468                // Attachements are all allocated in a block, so just need to
9469                //  find the first non-null one to delete
9470                if (pRenderPassInfo->pSubpasses[i].pInputAttachments) {
9471                    delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments;
9472                } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) {
9473                    delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments;
9474                } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) {
9475                    delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments;
9476                } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) {
9477                    delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments;
9478                }
9479            }
9480            delete[] pRenderPassInfo->pSubpasses;
9481        }
9482        delete[] pRenderPassInfo->pDependencies;
9483        delete pRenderPassInfo;
9484        delete (*ii).second;
9485    }
9486    my_data->renderPassMap.clear();
9487}
9488
9489VkBool32 VerifyFramebufferAndRenderPassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9490    VkBool32 skip_call = VK_FALSE;
9491    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9492    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9493    const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
9494    const VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer].createInfo;
9495    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
9496        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9497                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
9498                                                                 "with a different number of attachments.");
9499    }
9500    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9501        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9502        auto image_data = dev_data->imageViewMap.find(image_view);
9503        assert(image_data != dev_data->imageViewMap.end());
9504        const VkImage &image = image_data->second.image;
9505        const VkImageSubresourceRange &subRange = image_data->second.subresourceRange;
9506        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
9507                                             pRenderPassInfo->pAttachments[i].initialLayout};
9508        // TODO: Do not iterate over every possibility - consolidate where possible
9509        for (uint32_t j = 0; j < subRange.levelCount; j++) {
9510            uint32_t level = subRange.baseMipLevel + j;
9511            for (uint32_t k = 0; k < subRange.layerCount; k++) {
9512                uint32_t layer = subRange.baseArrayLayer + k;
9513                VkImageSubresource sub = {subRange.aspectMask, level, layer};
9514                IMAGE_CMD_BUF_LAYOUT_NODE node;
9515                if (!FindLayout(pCB, image, sub, node)) {
9516                    SetLayout(pCB, image, sub, newNode);
9517                    continue;
9518                }
9519                if (newNode.layout != node.layout) {
9520                    skip_call |=
9521                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9522                                DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using attachment %i "
9523                                                                    "where the "
9524                                                                    "initial layout is %s and the layout of the attachment at the "
9525                                                                    "start of the render pass is %s. The layouts must match.",
9526                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
9527                }
9528            }
9529        }
9530    }
9531    return skip_call;
9532}
9533
9534void TransitionSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const int subpass_index) {
9535    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9536    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9537    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9538    if (render_pass_data == dev_data->renderPassMap.end()) {
9539        return;
9540    }
9541    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9542    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9543    if (framebuffer_data == dev_data->frameBufferMap.end()) {
9544        return;
9545    }
9546    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9547    const VkSubpassDescription &subpass = pRenderPassInfo->pSubpasses[subpass_index];
9548    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9549        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pInputAttachments[j].attachment];
9550        SetLayout(dev_data, pCB, image_view, subpass.pInputAttachments[j].layout);
9551    }
9552    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9553        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pColorAttachments[j].attachment];
9554        SetLayout(dev_data, pCB, image_view, subpass.pColorAttachments[j].layout);
9555    }
9556    if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9557        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pDepthStencilAttachment->attachment];
9558        SetLayout(dev_data, pCB, image_view, subpass.pDepthStencilAttachment->layout);
9559    }
9560}
9561
9562VkBool32 validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
9563    VkBool32 skip_call = VK_FALSE;
9564    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
9565        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9566                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
9567                             cmd_name.c_str());
9568    }
9569    return skip_call;
9570}
9571
9572void TransitionFinalSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9573    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9574    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9575    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9576    if (render_pass_data == dev_data->renderPassMap.end()) {
9577        return;
9578    }
9579    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9580    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9581    if (framebuffer_data == dev_data->frameBufferMap.end()) {
9582        return;
9583    }
9584    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9585    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9586        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9587        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
9588    }
9589}
9590
9591bool VerifyRenderAreaBounds(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
9592    bool skip_call = false;
9593    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
9594    if (pRenderPassBegin->renderArea.offset.x < 0 ||
9595        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
9596        pRenderPassBegin->renderArea.offset.y < 0 ||
9597        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
9598        skip_call |= static_cast<bool>(log_msg(
9599            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9600            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
9601            "Cannot execute a render pass with renderArea not within the bound of the "
9602            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
9603            "height %d.",
9604            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
9605            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
9606    }
9607    return skip_call;
9608}
9609
9610VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
9611vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
9612    VkBool32 skipCall = VK_FALSE;
9613    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9614    loader_platform_thread_lock_mutex(&globalLock);
9615    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9616    if (pCB) {
9617        if (pRenderPassBegin && pRenderPassBegin->renderPass) {
9618#if MTMERGE
9619            auto pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9620            if (pass_data != dev_data->renderPassMap.end()) {
9621                RENDER_PASS_NODE* pRPNode = pass_data->second;
9622                pRPNode->fb = pRenderPassBegin->framebuffer;
9623                auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
9624                for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9625                    MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i];
9626                    if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
9627                        if (cb_data != dev_data->commandBufferMap.end()) {
9628                            std::function<VkBool32()> function = [=]() {
9629                                set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9630                                return VK_FALSE;
9631                            };
9632                            cb_data->second->validate_functions.push_back(function);
9633                        }
9634                        VkImageLayout &attachment_layout = pRPNode->attachment_first_layout[pRPNode->attachments[i].attachment];
9635                        if (attachment_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL ||
9636                            attachment_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
9637                            skipCall |=
9638                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9639                                        VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, (uint64_t)(pRenderPassBegin->renderPass), __LINE__,
9640                                        MEMTRACK_INVALID_LAYOUT, "MEM", "Cannot clear attachment %d with invalid first layout %d.",
9641                                        pRPNode->attachments[i].attachment, attachment_layout);
9642                        }
9643                    } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE) {
9644                        if (cb_data != dev_data->commandBufferMap.end()) {
9645                            std::function<VkBool32()> function = [=]() {
9646                                set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9647                                return VK_FALSE;
9648                            };
9649                            cb_data->second->validate_functions.push_back(function);
9650                        }
9651                    } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
9652                        if (cb_data != dev_data->commandBufferMap.end()) {
9653                            std::function<VkBool32()> function = [=]() {
9654                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9655                            };
9656                            cb_data->second->validate_functions.push_back(function);
9657                        }
9658                    }
9659                    if (pRPNode->attachment_first_read[pRPNode->attachments[i].attachment]) {
9660                        if (cb_data != dev_data->commandBufferMap.end()) {
9661                            std::function<VkBool32()> function = [=]() {
9662                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9663                            };
9664                            cb_data->second->validate_functions.push_back(function);
9665                        }
9666                    }
9667                }
9668            }
9669#endif
9670            skipCall |= static_cast<VkBool32>(VerifyRenderAreaBounds(dev_data, pRenderPassBegin));
9671            skipCall |= VerifyFramebufferAndRenderPassLayouts(commandBuffer, pRenderPassBegin);
9672            auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9673            if (render_pass_data != dev_data->renderPassMap.end()) {
9674                skipCall |= ValidateDependencies(dev_data, pRenderPassBegin, render_pass_data->second->subpassToNode);
9675            }
9676            skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass");
9677            skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass");
9678            skipCall |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
9679            pCB->activeRenderPass = pRenderPassBegin->renderPass;
9680            // This is a shallow copy as that is all that is needed for now
9681            pCB->activeRenderPassBeginInfo = *pRenderPassBegin;
9682            pCB->activeSubpass = 0;
9683            pCB->activeSubpassContents = contents;
9684            pCB->framebuffer = pRenderPassBegin->framebuffer;
9685            // Connect this framebuffer to this cmdBuffer
9686            dev_data->frameBufferMap[pCB->framebuffer].referencingCmdBuffers.insert(pCB->commandBuffer);
9687        } else {
9688            skipCall |=
9689                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9690                        DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
9691        }
9692    }
9693    loader_platform_thread_unlock_mutex(&globalLock);
9694    if (VK_FALSE == skipCall) {
9695        dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
9696        loader_platform_thread_lock_mutex(&globalLock);
9697        // This is a shallow copy as that is all that is needed for now
9698        dev_data->renderPassBeginInfo = *pRenderPassBegin;
9699        dev_data->currentSubpass = 0;
9700        loader_platform_thread_unlock_mutex(&globalLock);
9701    }
9702}
9703
9704VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
9705    VkBool32 skipCall = VK_FALSE;
9706    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9707    loader_platform_thread_lock_mutex(&globalLock);
9708    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9709    TransitionSubpassLayouts(commandBuffer, &dev_data->renderPassBeginInfo, ++dev_data->currentSubpass);
9710    if (pCB) {
9711        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
9712        skipCall |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
9713        pCB->activeSubpass++;
9714        pCB->activeSubpassContents = contents;
9715        TransitionSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
9716        if (pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline) {
9717            skipCall |= validatePipelineState(dev_data, pCB, VK_PIPELINE_BIND_POINT_GRAPHICS,
9718                                              pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
9719        }
9720        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
9721    }
9722    loader_platform_thread_unlock_mutex(&globalLock);
9723    if (VK_FALSE == skipCall)
9724        dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents);
9725}
9726
9727VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(VkCommandBuffer commandBuffer) {
9728    VkBool32 skipCall = VK_FALSE;
9729    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9730    loader_platform_thread_lock_mutex(&globalLock);
9731#if MTMERGESOURCE
9732    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
9733    if (cb_data != dev_data->commandBufferMap.end()) {
9734        auto pass_data = dev_data->renderPassMap.find(cb_data->second->activeRenderPass);
9735        if (pass_data != dev_data->renderPassMap.end()) {
9736            RENDER_PASS_NODE* pRPNode = pass_data->second;
9737            for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9738                MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i];
9739                if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
9740                    if (cb_data != dev_data->commandBufferMap.end()) {
9741                        std::function<VkBool32()> function = [=]() {
9742                            set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9743                            return VK_FALSE;
9744                        };
9745                        cb_data->second->validate_functions.push_back(function);
9746                    }
9747                } else if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE) {
9748                    if (cb_data != dev_data->commandBufferMap.end()) {
9749                        std::function<VkBool32()> function = [=]() {
9750                            set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9751                            return VK_FALSE;
9752                        };
9753                        cb_data->second->validate_functions.push_back(function);
9754                    }
9755                }
9756            }
9757        }
9758    }
9759#endif
9760    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9761    TransitionFinalSubpassLayouts(commandBuffer, &dev_data->renderPassBeginInfo);
9762    if (pCB) {
9763        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
9764        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
9765        skipCall |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9766        TransitionFinalSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo);
9767        pCB->activeRenderPass = 0;
9768        pCB->activeSubpass = 0;
9769    }
9770    loader_platform_thread_unlock_mutex(&globalLock);
9771    if (VK_FALSE == skipCall)
9772        dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer);
9773}
9774
9775bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9776                                 VkRenderPass primaryPass, uint32_t primaryAttach, uint32_t secondaryAttach, const char *msg) {
9777    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9778                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9779                   "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9780                   " that is not compatible with the current render pass %" PRIx64 "."
9781                   "Attachment %" PRIu32 " is not compatable with %" PRIu32 ". %s",
9782                   (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass), primaryAttach, secondaryAttach,
9783                   msg);
9784}
9785
9786bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9787                                     uint32_t primaryAttach, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9788                                     uint32_t secondaryAttach, bool is_multi) {
9789    bool skip_call = false;
9790    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9791    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9792    if (primary_data->second->pCreateInfo->attachmentCount <= primaryAttach) {
9793        primaryAttach = VK_ATTACHMENT_UNUSED;
9794    }
9795    if (secondary_data->second->pCreateInfo->attachmentCount <= secondaryAttach) {
9796        secondaryAttach = VK_ATTACHMENT_UNUSED;
9797    }
9798    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
9799        return skip_call;
9800    }
9801    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
9802        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9803                                                 secondaryAttach, "The first is unused while the second is not.");
9804        return skip_call;
9805    }
9806    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
9807        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9808                                                 secondaryAttach, "The second is unused while the first is not.");
9809        return skip_call;
9810    }
9811    if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].format !=
9812        secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].format) {
9813        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9814                                                 secondaryAttach, "They have different formats.");
9815    }
9816    if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].samples !=
9817        secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].samples) {
9818        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9819                                                 secondaryAttach, "They have different samples.");
9820    }
9821    if (is_multi &&
9822        primary_data->second->pCreateInfo->pAttachments[primaryAttach].flags !=
9823            secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].flags) {
9824        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9825                                                 secondaryAttach, "They have different flags.");
9826    }
9827    return skip_call;
9828}
9829
9830bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9831                                  VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass, const int subpass, bool is_multi) {
9832    bool skip_call = false;
9833    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9834    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9835    const VkSubpassDescription &primary_desc = primary_data->second->pCreateInfo->pSubpasses[subpass];
9836    const VkSubpassDescription &secondary_desc = secondary_data->second->pCreateInfo->pSubpasses[subpass];
9837    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
9838    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
9839        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
9840        if (i < primary_desc.inputAttachmentCount) {
9841            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
9842        }
9843        if (i < secondary_desc.inputAttachmentCount) {
9844            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
9845        }
9846        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_input_attach, secondaryBuffer,
9847                                                     secondaryPass, secondary_input_attach, is_multi);
9848    }
9849    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
9850    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
9851        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
9852        if (i < primary_desc.colorAttachmentCount) {
9853            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
9854        }
9855        if (i < secondary_desc.colorAttachmentCount) {
9856            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
9857        }
9858        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_color_attach, secondaryBuffer,
9859                                                     secondaryPass, secondary_color_attach, is_multi);
9860        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
9861        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
9862            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
9863        }
9864        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
9865            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
9866        }
9867        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_resolve_attach, secondaryBuffer,
9868                                                     secondaryPass, secondary_resolve_attach, is_multi);
9869    }
9870    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
9871    if (primary_desc.pDepthStencilAttachment) {
9872        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
9873    }
9874    if (secondary_desc.pDepthStencilAttachment) {
9875        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
9876    }
9877    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_depthstencil_attach, secondaryBuffer,
9878                                                 secondaryPass, secondary_depthstencil_attach, is_multi);
9879    return skip_call;
9880}
9881
9882bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9883                                     VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass) {
9884    bool skip_call = false;
9885    // Early exit if renderPass objects are identical (and therefore compatible)
9886    if (primaryPass == secondaryPass)
9887        return skip_call;
9888    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9889    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9890    if (primary_data == dev_data->renderPassMap.end() || primary_data->second == nullptr) {
9891        skip_call |=
9892            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9893                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9894                    "vkCmdExecuteCommands() called w/ invalid current Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9895                    (void *)primaryBuffer, (uint64_t)(primaryPass));
9896        return skip_call;
9897    }
9898    if (secondary_data == dev_data->renderPassMap.end() || secondary_data->second == nullptr) {
9899        skip_call |=
9900            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9901                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9902                    "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9903                    (void *)secondaryBuffer, (uint64_t)(secondaryPass));
9904        return skip_call;
9905    }
9906    if (primary_data->second->pCreateInfo->subpassCount != secondary_data->second->pCreateInfo->subpassCount) {
9907        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9908                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9909                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9910                             " that is not compatible with the current render pass %" PRIx64 "."
9911                             "They have a different number of subpasses.",
9912                             (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass));
9913        return skip_call;
9914    }
9915    bool is_multi = primary_data->second->pCreateInfo->subpassCount > 1;
9916    for (uint32_t i = 0; i < primary_data->second->pCreateInfo->subpassCount; ++i) {
9917        skip_call |=
9918            validateSubpassCompatibility(dev_data, primaryBuffer, primaryPass, secondaryBuffer, secondaryPass, i, is_multi);
9919    }
9920    return skip_call;
9921}
9922
9923bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9924                         VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
9925    bool skip_call = false;
9926    if (!pSubCB->beginInfo.pInheritanceInfo) {
9927        return skip_call;
9928    }
9929    VkFramebuffer primary_fb = pCB->framebuffer;
9930    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9931    if (secondary_fb != VK_NULL_HANDLE) {
9932        if (primary_fb != secondary_fb) {
9933            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9934                                 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9935                                 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a framebuffer %" PRIx64
9936                                 " that is not compatible with the current framebuffer %" PRIx64 ".",
9937                                 (void *)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb));
9938        }
9939        auto fb_data = dev_data->frameBufferMap.find(secondary_fb);
9940        if (fb_data == dev_data->frameBufferMap.end()) {
9941            skip_call |=
9942                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9943                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9944                                                                          "which has invalid framebuffer %" PRIx64 ".",
9945                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
9946            return skip_call;
9947        }
9948        skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb_data->second.createInfo.renderPass,
9949                                                     secondaryBuffer, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9950    }
9951    return skip_call;
9952}
9953
9954bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9955    bool skipCall = false;
9956    unordered_set<int> activeTypes;
9957    for (auto queryObject : pCB->activeQueries) {
9958        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9959        if (queryPoolData != dev_data->queryPoolMap.end()) {
9960            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9961                pSubCB->beginInfo.pInheritanceInfo) {
9962                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
9963                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
9964                    skipCall |= log_msg(
9965                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9966                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9967                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9968                        "which has invalid active query pool %" PRIx64 ". Pipeline statistics is being queried so the command "
9969                        "buffer must have all bits set on the queryPool.",
9970                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
9971                }
9972            }
9973            activeTypes.insert(queryPoolData->second.createInfo.queryType);
9974        }
9975    }
9976    for (auto queryObject : pSubCB->startedQueries) {
9977        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9978        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
9979            skipCall |=
9980                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9981                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9982                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9983                        "which has invalid active query pool %" PRIx64 "of type %d but a query of that type has been started on "
9984                        "secondary Cmd Buffer %p.",
9985                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
9986                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
9987        }
9988    }
9989    return skipCall;
9990}
9991
9992VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
9993vkCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
9994    VkBool32 skipCall = VK_FALSE;
9995    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9996    loader_platform_thread_lock_mutex(&globalLock);
9997    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9998    if (pCB) {
9999        GLOBAL_CB_NODE *pSubCB = NULL;
10000        for (uint32_t i = 0; i < commandBuffersCount; i++) {
10001            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
10002            if (!pSubCB) {
10003                skipCall |=
10004                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10005                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10006                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p in element %u of pCommandBuffers array.",
10007                            (void *)pCommandBuffers[i], i);
10008            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
10009                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10010                                    __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10011                                    "vkCmdExecuteCommands() called w/ Primary Cmd Buffer %p in element %u of pCommandBuffers "
10012                                    "array. All cmd buffers in pCommandBuffers array must be secondary.",
10013                                    (void *)pCommandBuffers[i], i);
10014            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
10015                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
10016                    skipCall |= log_msg(
10017                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10018                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
10019                        "vkCmdExecuteCommands(): Secondary Command Buffer (%p) executed within render pass (%#" PRIxLEAST64
10020                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
10021                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass);
10022                } else {
10023                    // Make sure render pass is compatible with parent command buffer pass if has continue
10024                    skipCall |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass, pCommandBuffers[i],
10025                                                                pSubCB->beginInfo.pInheritanceInfo->renderPass);
10026                    skipCall |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
10027                }
10028                string errorString = "";
10029                if (!verify_renderpass_compatibility(dev_data, pCB->activeRenderPass,
10030                                                     pSubCB->beginInfo.pInheritanceInfo->renderPass, errorString)) {
10031                    skipCall |= log_msg(
10032                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10033                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
10034                        "vkCmdExecuteCommands(): Secondary Command Buffer (%p) w/ render pass (%#" PRIxLEAST64
10035                        ") is incompatible w/ primary command buffer (%p) w/ render pass (%#" PRIxLEAST64 ") due to: %s",
10036                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
10037                        (uint64_t)pCB->activeRenderPass, errorString.c_str());
10038                }
10039                //  If framebuffer for secondary CB is not NULL, then it must match FB from vkCmdBeginRenderPass()
10040                //   that this CB will be executed in AND framebuffer must have been created w/ RP compatible w/ renderpass
10041                if (pSubCB->beginInfo.pInheritanceInfo->framebuffer) {
10042                    if (pSubCB->beginInfo.pInheritanceInfo->framebuffer != pCB->activeRenderPassBeginInfo.framebuffer) {
10043                        skipCall |= log_msg(
10044                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10045                            (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
10046                            "vkCmdExecuteCommands(): Secondary Command Buffer (%p) references framebuffer (%#" PRIxLEAST64
10047                            ") that does not match framebuffer (%#" PRIxLEAST64 ") in active renderpass (%#" PRIxLEAST64 ").",
10048                            (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer,
10049                            (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass);
10050                    }
10051                }
10052            }
10053            // TODO(mlentine): Move more logic into this method
10054            skipCall |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
10055            skipCall |= validateCommandBufferState(dev_data, pSubCB);
10056            // Secondary cmdBuffers are considered pending execution starting w/
10057            // being recorded
10058            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
10059                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
10060                    skipCall |= log_msg(
10061                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10062                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10063                        "Attempt to simultaneously execute CB %#" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10064                        "set!",
10065                        (uint64_t)(pCB->commandBuffer));
10066                }
10067                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
10068                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
10069                    skipCall |= log_msg(
10070                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10071                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10072                        "vkCmdExecuteCommands(): Secondary Command Buffer (%#" PRIxLEAST64
10073                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
10074                        "(%#" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10075                                          "set, even though it does.",
10076                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
10077                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
10078                }
10079            }
10080            if (!pCB->activeQueries.empty() && !dev_data->physDevProperties.features.inheritedQueries) {
10081                skipCall |=
10082                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10083                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
10084                            "vkCmdExecuteCommands(): Secondary Command Buffer "
10085                            "(%#" PRIxLEAST64 ") cannot be submitted with a query in "
10086                            "flight and inherited queries not "
10087                            "supported on this device.",
10088                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
10089            }
10090            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
10091            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
10092            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
10093        }
10094        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
10095        skipCall |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
10096    }
10097    loader_platform_thread_unlock_mutex(&globalLock);
10098    if (VK_FALSE == skipCall)
10099        dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
10100}
10101
10102VkBool32 ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) {
10103    VkBool32 skip_call = VK_FALSE;
10104    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10105    auto mem_data = dev_data->memObjMap.find(mem);
10106    if ((mem_data != dev_data->memObjMap.end()) && (mem_data->second.image != VK_NULL_HANDLE)) {
10107        std::vector<VkImageLayout> layouts;
10108        if (FindLayouts(dev_data, mem_data->second.image, layouts)) {
10109            for (auto layout : layouts) {
10110                if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
10111                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10112                                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
10113                                                                                         "GENERAL or PREINITIALIZED are supported.",
10114                                         string_VkImageLayout(layout));
10115                }
10116            }
10117        }
10118    }
10119    return skip_call;
10120}
10121
10122VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10123vkMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
10124    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10125
10126    VkBool32 skip_call = VK_FALSE;
10127    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10128    loader_platform_thread_lock_mutex(&globalLock);
10129#if MTMERGESOURCE
10130    DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
10131    if (pMemObj) {
10132        pMemObj->valid = true;
10133        if ((memProps.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
10134            skip_call =
10135                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10136                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
10137                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj %#" PRIxLEAST64, (uint64_t)mem);
10138        }
10139    }
10140    skip_call |= validateMemRange(dev_data, mem, offset, size);
10141    storeMemRanges(dev_data, mem, offset, size);
10142#endif
10143    skip_call |= ValidateMapImageLayouts(device, mem);
10144    loader_platform_thread_unlock_mutex(&globalLock);
10145
10146    if (VK_FALSE == skip_call) {
10147        result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData);
10148#if MTMERGESOURCE
10149        loader_platform_thread_lock_mutex(&globalLock);
10150        initializeAndTrackMemory(dev_data, mem, size, ppData);
10151        loader_platform_thread_unlock_mutex(&globalLock);
10152#endif
10153    }
10154    return result;
10155}
10156
10157#if MTMERGESOURCE
10158VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(VkDevice device, VkDeviceMemory mem) {
10159    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10160    VkBool32 skipCall = VK_FALSE;
10161
10162    loader_platform_thread_lock_mutex(&globalLock);
10163    skipCall |= deleteMemRanges(my_data, mem);
10164    loader_platform_thread_unlock_mutex(&globalLock);
10165    if (VK_FALSE == skipCall) {
10166        my_data->device_dispatch_table->UnmapMemory(device, mem);
10167    }
10168}
10169
10170VkBool32 validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount,
10171                                const VkMappedMemoryRange *pMemRanges) {
10172    VkBool32 skipCall = VK_FALSE;
10173    for (uint32_t i = 0; i < memRangeCount; ++i) {
10174        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
10175        if (mem_element != my_data->memObjMap.end()) {
10176            if (mem_element->second.memRange.offset > pMemRanges[i].offset) {
10177                skipCall |= log_msg(
10178                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10179                    (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10180                    "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
10181                    "(" PRINTF_SIZE_T_SPECIFIER ").",
10182                    funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_element->second.memRange.offset));
10183            }
10184            if ((mem_element->second.memRange.size != VK_WHOLE_SIZE) &&
10185                ((mem_element->second.memRange.offset + mem_element->second.memRange.size) <
10186                 (pMemRanges[i].offset + pMemRanges[i].size))) {
10187                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10188                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10189                                    MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
10190                                                                 ") exceeds the Memory Object's upper-bound "
10191                                                                 "(" PRINTF_SIZE_T_SPECIFIER ").",
10192                                    funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
10193                                    static_cast<size_t>(mem_element->second.memRange.offset + mem_element->second.memRange.size));
10194            }
10195        }
10196    }
10197    return skipCall;
10198}
10199
10200VkBool32 validateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount,
10201                                                  const VkMappedMemoryRange *pMemRanges) {
10202    VkBool32 skipCall = VK_FALSE;
10203    for (uint32_t i = 0; i < memRangeCount; ++i) {
10204        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
10205        if (mem_element != my_data->memObjMap.end()) {
10206            if (mem_element->second.pData) {
10207                VkDeviceSize size = mem_element->second.memRange.size;
10208                VkDeviceSize half_size = (size / 2);
10209                char *data = static_cast<char *>(mem_element->second.pData);
10210                for (auto j = 0; j < half_size; ++j) {
10211                    if (data[j] != NoncoherentMemoryFillValue) {
10212                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10213                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10214                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
10215                                            (uint64_t)pMemRanges[i].memory);
10216                    }
10217                }
10218                for (auto j = size + half_size; j < 2 * size; ++j) {
10219                    if (data[j] != NoncoherentMemoryFillValue) {
10220                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10221                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10222                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
10223                                            (uint64_t)pMemRanges[i].memory);
10224                    }
10225                }
10226                memcpy(mem_element->second.pDriverData, static_cast<void *>(data + (size_t)(half_size)), (size_t)(size));
10227            }
10228        }
10229    }
10230    return skipCall;
10231}
10232
10233VK_LAYER_EXPORT VkResult VKAPI_CALL
10234vkFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10235    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10236    VkBool32 skipCall = VK_FALSE;
10237    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10238
10239    loader_platform_thread_lock_mutex(&globalLock);
10240    skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
10241    skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
10242    loader_platform_thread_unlock_mutex(&globalLock);
10243    if (VK_FALSE == skipCall) {
10244        result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
10245    }
10246    return result;
10247}
10248
10249VK_LAYER_EXPORT VkResult VKAPI_CALL
10250vkInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10251    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10252    VkBool32 skipCall = VK_FALSE;
10253    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10254
10255    loader_platform_thread_lock_mutex(&globalLock);
10256    skipCall |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
10257    loader_platform_thread_unlock_mutex(&globalLock);
10258    if (VK_FALSE == skipCall) {
10259        result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
10260    }
10261    return result;
10262}
10263#endif
10264
10265VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
10266    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10267    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10268    VkBool32 skipCall = VK_FALSE;
10269#if MTMERGESOURCE
10270    loader_platform_thread_lock_mutex(&globalLock);
10271    // Track objects tied to memory
10272    uint64_t image_handle = (uint64_t)(image);
10273    skipCall =
10274        set_mem_binding(dev_data, device, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
10275    add_object_binding_info(dev_data, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, mem);
10276    {
10277        VkMemoryRequirements memRequirements;
10278        vkGetImageMemoryRequirements(device, image, &memRequirements);
10279        skipCall |= validate_buffer_image_aliasing(dev_data, image_handle, mem, memoryOffset, memRequirements,
10280                                                   dev_data->memObjMap[mem].imageRanges, dev_data->memObjMap[mem].bufferRanges,
10281                                                   VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
10282    }
10283    print_mem_list(dev_data, device);
10284    loader_platform_thread_unlock_mutex(&globalLock);
10285#endif
10286    if (VK_FALSE == skipCall) {
10287        result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset);
10288        VkMemoryRequirements memRequirements;
10289        dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements);
10290        loader_platform_thread_lock_mutex(&globalLock);
10291        dev_data->memObjMap[mem].image = image;
10292        dev_data->imageMap[image].mem = mem;
10293        dev_data->imageMap[image].memOffset = memoryOffset;
10294        dev_data->imageMap[image].memSize = memRequirements.size;
10295        loader_platform_thread_unlock_mutex(&globalLock);
10296    }
10297    return result;
10298}
10299
10300VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(VkDevice device, VkEvent event) {
10301    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10302    loader_platform_thread_lock_mutex(&globalLock);
10303    dev_data->eventMap[event].needsSignaled = false;
10304    dev_data->eventMap[event].stageMask = VK_PIPELINE_STAGE_HOST_BIT;
10305    loader_platform_thread_unlock_mutex(&globalLock);
10306    VkResult result = dev_data->device_dispatch_table->SetEvent(device, event);
10307    return result;
10308}
10309
10310VKAPI_ATTR VkResult VKAPI_CALL
10311vkQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
10312    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10313    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10314    VkBool32 skip_call = VK_FALSE;
10315#if MTMERGESOURCE
10316    //MTMTODO : Merge this code with the checks below
10317    loader_platform_thread_lock_mutex(&globalLock);
10318
10319    for (uint32_t i = 0; i < bindInfoCount; i++) {
10320        const VkBindSparseInfo *bindInfo = &pBindInfo[i];
10321        // Track objects tied to memory
10322        for (uint32_t j = 0; j < bindInfo->bufferBindCount; j++) {
10323            for (uint32_t k = 0; k < bindInfo->pBufferBinds[j].bindCount; k++) {
10324                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pBufferBinds[j].pBinds[k].memory,
10325                                           (uint64_t)bindInfo->pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
10326                                           "vkQueueBindSparse"))
10327                    skip_call = VK_TRUE;
10328            }
10329        }
10330        for (uint32_t j = 0; j < bindInfo->imageOpaqueBindCount; j++) {
10331            for (uint32_t k = 0; k < bindInfo->pImageOpaqueBinds[j].bindCount; k++) {
10332                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pImageOpaqueBinds[j].pBinds[k].memory,
10333                                           (uint64_t)bindInfo->pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10334                                           "vkQueueBindSparse"))
10335                    skip_call = VK_TRUE;
10336            }
10337        }
10338        for (uint32_t j = 0; j < bindInfo->imageBindCount; j++) {
10339            for (uint32_t k = 0; k < bindInfo->pImageBinds[j].bindCount; k++) {
10340                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pImageBinds[j].pBinds[k].memory,
10341                                           (uint64_t)bindInfo->pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10342                                           "vkQueueBindSparse"))
10343                    skip_call = VK_TRUE;
10344            }
10345        }
10346        // Validate semaphore state
10347        for (uint32_t i = 0; i < bindInfo->waitSemaphoreCount; i++) {
10348            VkSemaphore sem = bindInfo->pWaitSemaphores[i];
10349
10350            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10351                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_SIGNALLED) {
10352                    skip_call =
10353                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10354                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10355                                "vkQueueBindSparse: Semaphore must be in signaled state before passing to pWaitSemaphores");
10356                }
10357                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_WAIT;
10358            }
10359        }
10360        for (uint32_t i = 0; i < bindInfo->signalSemaphoreCount; i++) {
10361            VkSemaphore sem = bindInfo->pSignalSemaphores[i];
10362
10363            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10364                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
10365                    skip_call =
10366                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10367                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10368                                "vkQueueBindSparse: Semaphore must not be currently signaled or in a wait state");
10369                }
10370                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
10371            }
10372        }
10373    }
10374
10375    print_mem_list(dev_data, queue);
10376    loader_platform_thread_unlock_mutex(&globalLock);
10377#endif
10378    loader_platform_thread_lock_mutex(&globalLock);
10379    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
10380        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
10381        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10382            if (dev_data->semaphoreMap[bindInfo.pWaitSemaphores[i]].signaled) {
10383                dev_data->semaphoreMap[bindInfo.pWaitSemaphores[i]].signaled = 0;
10384            } else {
10385                skip_call |=
10386                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10387                            __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10388                            "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
10389                            (uint64_t)(queue), (uint64_t)(bindInfo.pWaitSemaphores[i]));
10390            }
10391        }
10392        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
10393            dev_data->semaphoreMap[bindInfo.pSignalSemaphores[i]].signaled = 1;
10394        }
10395    }
10396    loader_platform_thread_unlock_mutex(&globalLock);
10397
10398    if (VK_FALSE == skip_call)
10399        return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
10400#if MTMERGESOURCE
10401    // Update semaphore state
10402    loader_platform_thread_lock_mutex(&globalLock);
10403    for (uint32_t bind_info_idx = 0; bind_info_idx < bindInfoCount; bind_info_idx++) {
10404        const VkBindSparseInfo *bindInfo = &pBindInfo[bind_info_idx];
10405        for (uint32_t i = 0; i < bindInfo->waitSemaphoreCount; i++) {
10406            VkSemaphore sem = bindInfo->pWaitSemaphores[i];
10407
10408            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10409                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10410            }
10411        }
10412    }
10413    loader_platform_thread_unlock_mutex(&globalLock);
10414#endif
10415
10416    return result;
10417}
10418
10419VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
10420                                                 const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
10421    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10422    VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
10423    if (result == VK_SUCCESS) {
10424        loader_platform_thread_lock_mutex(&globalLock);
10425        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
10426        sNode->signaled = 0;
10427        sNode->queue = VK_NULL_HANDLE;
10428        sNode->in_use.store(0);
10429        sNode->state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10430        loader_platform_thread_unlock_mutex(&globalLock);
10431    }
10432    return result;
10433}
10434
10435VKAPI_ATTR VkResult VKAPI_CALL
10436vkCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
10437    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10438    VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
10439    if (result == VK_SUCCESS) {
10440        loader_platform_thread_lock_mutex(&globalLock);
10441        dev_data->eventMap[*pEvent].needsSignaled = false;
10442        dev_data->eventMap[*pEvent].in_use.store(0);
10443        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
10444        loader_platform_thread_unlock_mutex(&globalLock);
10445    }
10446    return result;
10447}
10448
10449VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
10450                                                                    const VkAllocationCallbacks *pAllocator,
10451                                                                    VkSwapchainKHR *pSwapchain) {
10452    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10453    VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
10454
10455    if (VK_SUCCESS == result) {
10456        SWAPCHAIN_NODE *psc_node = new SWAPCHAIN_NODE(pCreateInfo);
10457        loader_platform_thread_lock_mutex(&globalLock);
10458        dev_data->device_extensions.swapchainMap[*pSwapchain] = psc_node;
10459        loader_platform_thread_unlock_mutex(&globalLock);
10460    }
10461
10462    return result;
10463}
10464
10465VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10466vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
10467    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10468    bool skipCall = false;
10469
10470    loader_platform_thread_lock_mutex(&globalLock);
10471    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(swapchain);
10472    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
10473        if (swapchain_data->second->images.size() > 0) {
10474            for (auto swapchain_image : swapchain_data->second->images) {
10475                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
10476                if (image_sub != dev_data->imageSubresourceMap.end()) {
10477                    for (auto imgsubpair : image_sub->second) {
10478                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
10479                        if (image_item != dev_data->imageLayoutMap.end()) {
10480                            dev_data->imageLayoutMap.erase(image_item);
10481                        }
10482                    }
10483                    dev_data->imageSubresourceMap.erase(image_sub);
10484                }
10485#if MTMERGESOURCE
10486                skipCall = clear_object_binding(dev_data, device, (uint64_t)swapchain_image,
10487                                                VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
10488                dev_data->imageBindingMap.erase((uint64_t)swapchain_image);
10489#endif
10490            }
10491        }
10492        delete swapchain_data->second;
10493        dev_data->device_extensions.swapchainMap.erase(swapchain);
10494    }
10495    loader_platform_thread_unlock_mutex(&globalLock);
10496    if (!skipCall)
10497        dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator);
10498}
10499
10500VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10501vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
10502    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10503    VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
10504
10505    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
10506        // This should never happen and is checked by param checker.
10507        if (!pCount)
10508            return result;
10509        loader_platform_thread_lock_mutex(&globalLock);
10510        const size_t count = *pCount;
10511        auto swapchain_node = dev_data->device_extensions.swapchainMap[swapchain];
10512        if (!swapchain_node->images.empty()) {
10513            // TODO : Not sure I like the memcmp here, but it works
10514            const bool mismatch = (swapchain_node->images.size() != count ||
10515                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
10516            if (mismatch) {
10517                // TODO: Verify against Valid Usage section of extension
10518                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10519                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
10520                        "vkGetSwapchainInfoKHR(%" PRIu64
10521                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
10522                        (uint64_t)(swapchain));
10523            }
10524        }
10525        for (uint32_t i = 0; i < *pCount; ++i) {
10526            IMAGE_LAYOUT_NODE image_layout_node;
10527            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
10528            image_layout_node.format = swapchain_node->createInfo.imageFormat;
10529            dev_data->imageMap[pSwapchainImages[i]].createInfo.mipLevels = 1;
10530            dev_data->imageMap[pSwapchainImages[i]].createInfo.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
10531            swapchain_node->images.push_back(pSwapchainImages[i]);
10532            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
10533            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
10534            dev_data->imageLayoutMap[subpair] = image_layout_node;
10535            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
10536        }
10537        if (!swapchain_node->images.empty()) {
10538            for (auto image : swapchain_node->images) {
10539                // Add image object binding, then insert the new Mem Object and then bind it to created image
10540#if MTMERGESOURCE
10541                add_object_create_info(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10542                                       &swapchain_node->createInfo);
10543#endif
10544            }
10545        }
10546        loader_platform_thread_unlock_mutex(&globalLock);
10547    }
10548    return result;
10549}
10550
10551VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
10552    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10553    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10554    bool skip_call = false;
10555
10556    if (pPresentInfo) {
10557        loader_platform_thread_lock_mutex(&globalLock);
10558        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10559            if (dev_data->semaphoreMap[pPresentInfo->pWaitSemaphores[i]].signaled) {
10560                dev_data->semaphoreMap[pPresentInfo->pWaitSemaphores[i]].signaled = 0;
10561            } else {
10562                skip_call |=
10563                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10564                            __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10565                            "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
10566                            (uint64_t)(queue), (uint64_t)(pPresentInfo->pWaitSemaphores[i]));
10567            }
10568        }
10569        VkDeviceMemory mem;
10570        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10571            auto swapchain_data = dev_data->device_extensions.swapchainMap.find(pPresentInfo->pSwapchains[i]);
10572            if (swapchain_data != dev_data->device_extensions.swapchainMap.end() &&
10573                pPresentInfo->pImageIndices[i] < swapchain_data->second->images.size()) {
10574                VkImage image = swapchain_data->second->images[pPresentInfo->pImageIndices[i]];
10575#if MTMERGESOURCE
10576                skip_call |=
10577                    get_mem_binding_from_object(dev_data, queue, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
10578                skip_call |= validate_memory_is_valid(dev_data, mem, "vkQueuePresentKHR()", image);
10579#endif
10580                vector<VkImageLayout> layouts;
10581                if (FindLayouts(dev_data, image, layouts)) {
10582                    for (auto layout : layouts) {
10583                        if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
10584                            skip_call |=
10585                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
10586                                        reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10587                                        "Images passed to present must be in layout "
10588                                        "PRESENT_SOURCE_KHR but is in %s",
10589                                        string_VkImageLayout(layout));
10590                        }
10591                    }
10592                }
10593            }
10594        }
10595        loader_platform_thread_unlock_mutex(&globalLock);
10596    }
10597
10598    if (!skip_call)
10599        result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo);
10600#if MTMERGESOURCE
10601    loader_platform_thread_lock_mutex(&globalLock);
10602    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; i++) {
10603        VkSemaphore sem = pPresentInfo->pWaitSemaphores[i];
10604        if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10605            dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10606        }
10607    }
10608    loader_platform_thread_unlock_mutex(&globalLock);
10609#endif
10610    return result;
10611}
10612
10613VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
10614                                                     VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
10615    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10616    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10617    bool skipCall = false;
10618#if MTMERGESOURCE
10619    loader_platform_thread_lock_mutex(&globalLock);
10620    if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
10621        if (dev_data->semaphoreMap[semaphore].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
10622            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10623                               (uint64_t)semaphore, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10624                               "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
10625        }
10626        dev_data->semaphoreMap[semaphore].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
10627    }
10628    auto fence_data = dev_data->fenceMap.find(fence);
10629    if (fence_data != dev_data->fenceMap.end()) {
10630        fence_data->second.swapchain = swapchain;
10631    }
10632    loader_platform_thread_unlock_mutex(&globalLock);
10633#endif
10634    if (!skipCall) {
10635        result =
10636            dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
10637    }
10638    loader_platform_thread_lock_mutex(&globalLock);
10639    // FIXME/TODO: Need to add some thing code the "fence" parameter
10640    dev_data->semaphoreMap[semaphore].signaled = 1;
10641    loader_platform_thread_unlock_mutex(&globalLock);
10642    return result;
10643}
10644
10645VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10646vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10647                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
10648    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10649    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10650    VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10651    if (VK_SUCCESS == res) {
10652        loader_platform_thread_lock_mutex(&globalLock);
10653        res = layer_create_msg_callback(my_data->report_data, pCreateInfo, pAllocator, pMsgCallback);
10654        loader_platform_thread_unlock_mutex(&globalLock);
10655    }
10656    return res;
10657}
10658
10659VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance,
10660                                                                           VkDebugReportCallbackEXT msgCallback,
10661                                                                           const VkAllocationCallbacks *pAllocator) {
10662    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10663    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10664    pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10665    loader_platform_thread_lock_mutex(&globalLock);
10666    layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
10667    loader_platform_thread_unlock_mutex(&globalLock);
10668}
10669
10670VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10671vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
10672                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10673    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10674    my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
10675                                                            pMsg);
10676}
10677
10678VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10679    if (!strcmp(funcName, "vkGetDeviceProcAddr"))
10680        return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
10681    if (!strcmp(funcName, "vkDestroyDevice"))
10682        return (PFN_vkVoidFunction)vkDestroyDevice;
10683    if (!strcmp(funcName, "vkQueueSubmit"))
10684        return (PFN_vkVoidFunction)vkQueueSubmit;
10685    if (!strcmp(funcName, "vkWaitForFences"))
10686        return (PFN_vkVoidFunction)vkWaitForFences;
10687    if (!strcmp(funcName, "vkGetFenceStatus"))
10688        return (PFN_vkVoidFunction)vkGetFenceStatus;
10689    if (!strcmp(funcName, "vkQueueWaitIdle"))
10690        return (PFN_vkVoidFunction)vkQueueWaitIdle;
10691    if (!strcmp(funcName, "vkDeviceWaitIdle"))
10692        return (PFN_vkVoidFunction)vkDeviceWaitIdle;
10693    if (!strcmp(funcName, "vkGetDeviceQueue"))
10694        return (PFN_vkVoidFunction)vkGetDeviceQueue;
10695    if (!strcmp(funcName, "vkDestroyInstance"))
10696        return (PFN_vkVoidFunction)vkDestroyInstance;
10697    if (!strcmp(funcName, "vkDestroyDevice"))
10698        return (PFN_vkVoidFunction)vkDestroyDevice;
10699    if (!strcmp(funcName, "vkDestroyFence"))
10700        return (PFN_vkVoidFunction)vkDestroyFence;
10701    if (!strcmp(funcName, "vkResetFences"))
10702        return (PFN_vkVoidFunction)vkResetFences;
10703    if (!strcmp(funcName, "vkDestroySemaphore"))
10704        return (PFN_vkVoidFunction)vkDestroySemaphore;
10705    if (!strcmp(funcName, "vkDestroyEvent"))
10706        return (PFN_vkVoidFunction)vkDestroyEvent;
10707    if (!strcmp(funcName, "vkDestroyQueryPool"))
10708        return (PFN_vkVoidFunction)vkDestroyQueryPool;
10709    if (!strcmp(funcName, "vkDestroyBuffer"))
10710        return (PFN_vkVoidFunction)vkDestroyBuffer;
10711    if (!strcmp(funcName, "vkDestroyBufferView"))
10712        return (PFN_vkVoidFunction)vkDestroyBufferView;
10713    if (!strcmp(funcName, "vkDestroyImage"))
10714        return (PFN_vkVoidFunction)vkDestroyImage;
10715    if (!strcmp(funcName, "vkDestroyImageView"))
10716        return (PFN_vkVoidFunction)vkDestroyImageView;
10717    if (!strcmp(funcName, "vkDestroyShaderModule"))
10718        return (PFN_vkVoidFunction)vkDestroyShaderModule;
10719    if (!strcmp(funcName, "vkDestroyPipeline"))
10720        return (PFN_vkVoidFunction)vkDestroyPipeline;
10721    if (!strcmp(funcName, "vkDestroyPipelineLayout"))
10722        return (PFN_vkVoidFunction)vkDestroyPipelineLayout;
10723    if (!strcmp(funcName, "vkDestroySampler"))
10724        return (PFN_vkVoidFunction)vkDestroySampler;
10725    if (!strcmp(funcName, "vkDestroyDescriptorSetLayout"))
10726        return (PFN_vkVoidFunction)vkDestroyDescriptorSetLayout;
10727    if (!strcmp(funcName, "vkDestroyDescriptorPool"))
10728        return (PFN_vkVoidFunction)vkDestroyDescriptorPool;
10729    if (!strcmp(funcName, "vkDestroyFramebuffer"))
10730        return (PFN_vkVoidFunction)vkDestroyFramebuffer;
10731    if (!strcmp(funcName, "vkDestroyRenderPass"))
10732        return (PFN_vkVoidFunction)vkDestroyRenderPass;
10733    if (!strcmp(funcName, "vkCreateBuffer"))
10734        return (PFN_vkVoidFunction)vkCreateBuffer;
10735    if (!strcmp(funcName, "vkCreateBufferView"))
10736        return (PFN_vkVoidFunction)vkCreateBufferView;
10737    if (!strcmp(funcName, "vkCreateImage"))
10738        return (PFN_vkVoidFunction)vkCreateImage;
10739    if (!strcmp(funcName, "vkCreateImageView"))
10740        return (PFN_vkVoidFunction)vkCreateImageView;
10741    if (!strcmp(funcName, "vkCreateFence"))
10742        return (PFN_vkVoidFunction)vkCreateFence;
10743    if (!strcmp(funcName, "CreatePipelineCache"))
10744        return (PFN_vkVoidFunction)vkCreatePipelineCache;
10745    if (!strcmp(funcName, "DestroyPipelineCache"))
10746        return (PFN_vkVoidFunction)vkDestroyPipelineCache;
10747    if (!strcmp(funcName, "GetPipelineCacheData"))
10748        return (PFN_vkVoidFunction)vkGetPipelineCacheData;
10749    if (!strcmp(funcName, "MergePipelineCaches"))
10750        return (PFN_vkVoidFunction)vkMergePipelineCaches;
10751    if (!strcmp(funcName, "vkCreateGraphicsPipelines"))
10752        return (PFN_vkVoidFunction)vkCreateGraphicsPipelines;
10753    if (!strcmp(funcName, "vkCreateComputePipelines"))
10754        return (PFN_vkVoidFunction)vkCreateComputePipelines;
10755    if (!strcmp(funcName, "vkCreateSampler"))
10756        return (PFN_vkVoidFunction)vkCreateSampler;
10757    if (!strcmp(funcName, "vkCreateDescriptorSetLayout"))
10758        return (PFN_vkVoidFunction)vkCreateDescriptorSetLayout;
10759    if (!strcmp(funcName, "vkCreatePipelineLayout"))
10760        return (PFN_vkVoidFunction)vkCreatePipelineLayout;
10761    if (!strcmp(funcName, "vkCreateDescriptorPool"))
10762        return (PFN_vkVoidFunction)vkCreateDescriptorPool;
10763    if (!strcmp(funcName, "vkResetDescriptorPool"))
10764        return (PFN_vkVoidFunction)vkResetDescriptorPool;
10765    if (!strcmp(funcName, "vkAllocateDescriptorSets"))
10766        return (PFN_vkVoidFunction)vkAllocateDescriptorSets;
10767    if (!strcmp(funcName, "vkFreeDescriptorSets"))
10768        return (PFN_vkVoidFunction)vkFreeDescriptorSets;
10769    if (!strcmp(funcName, "vkUpdateDescriptorSets"))
10770        return (PFN_vkVoidFunction)vkUpdateDescriptorSets;
10771    if (!strcmp(funcName, "vkCreateCommandPool"))
10772        return (PFN_vkVoidFunction)vkCreateCommandPool;
10773    if (!strcmp(funcName, "vkDestroyCommandPool"))
10774        return (PFN_vkVoidFunction)vkDestroyCommandPool;
10775    if (!strcmp(funcName, "vkResetCommandPool"))
10776        return (PFN_vkVoidFunction)vkResetCommandPool;
10777    if (!strcmp(funcName, "vkCreateQueryPool"))
10778        return (PFN_vkVoidFunction)vkCreateQueryPool;
10779    if (!strcmp(funcName, "vkAllocateCommandBuffers"))
10780        return (PFN_vkVoidFunction)vkAllocateCommandBuffers;
10781    if (!strcmp(funcName, "vkFreeCommandBuffers"))
10782        return (PFN_vkVoidFunction)vkFreeCommandBuffers;
10783    if (!strcmp(funcName, "vkBeginCommandBuffer"))
10784        return (PFN_vkVoidFunction)vkBeginCommandBuffer;
10785    if (!strcmp(funcName, "vkEndCommandBuffer"))
10786        return (PFN_vkVoidFunction)vkEndCommandBuffer;
10787    if (!strcmp(funcName, "vkResetCommandBuffer"))
10788        return (PFN_vkVoidFunction)vkResetCommandBuffer;
10789    if (!strcmp(funcName, "vkCmdBindPipeline"))
10790        return (PFN_vkVoidFunction)vkCmdBindPipeline;
10791    if (!strcmp(funcName, "vkCmdSetViewport"))
10792        return (PFN_vkVoidFunction)vkCmdSetViewport;
10793    if (!strcmp(funcName, "vkCmdSetScissor"))
10794        return (PFN_vkVoidFunction)vkCmdSetScissor;
10795    if (!strcmp(funcName, "vkCmdSetLineWidth"))
10796        return (PFN_vkVoidFunction)vkCmdSetLineWidth;
10797    if (!strcmp(funcName, "vkCmdSetDepthBias"))
10798        return (PFN_vkVoidFunction)vkCmdSetDepthBias;
10799    if (!strcmp(funcName, "vkCmdSetBlendConstants"))
10800        return (PFN_vkVoidFunction)vkCmdSetBlendConstants;
10801    if (!strcmp(funcName, "vkCmdSetDepthBounds"))
10802        return (PFN_vkVoidFunction)vkCmdSetDepthBounds;
10803    if (!strcmp(funcName, "vkCmdSetStencilCompareMask"))
10804        return (PFN_vkVoidFunction)vkCmdSetStencilCompareMask;
10805    if (!strcmp(funcName, "vkCmdSetStencilWriteMask"))
10806        return (PFN_vkVoidFunction)vkCmdSetStencilWriteMask;
10807    if (!strcmp(funcName, "vkCmdSetStencilReference"))
10808        return (PFN_vkVoidFunction)vkCmdSetStencilReference;
10809    if (!strcmp(funcName, "vkCmdBindDescriptorSets"))
10810        return (PFN_vkVoidFunction)vkCmdBindDescriptorSets;
10811    if (!strcmp(funcName, "vkCmdBindVertexBuffers"))
10812        return (PFN_vkVoidFunction)vkCmdBindVertexBuffers;
10813    if (!strcmp(funcName, "vkCmdBindIndexBuffer"))
10814        return (PFN_vkVoidFunction)vkCmdBindIndexBuffer;
10815    if (!strcmp(funcName, "vkCmdDraw"))
10816        return (PFN_vkVoidFunction)vkCmdDraw;
10817    if (!strcmp(funcName, "vkCmdDrawIndexed"))
10818        return (PFN_vkVoidFunction)vkCmdDrawIndexed;
10819    if (!strcmp(funcName, "vkCmdDrawIndirect"))
10820        return (PFN_vkVoidFunction)vkCmdDrawIndirect;
10821    if (!strcmp(funcName, "vkCmdDrawIndexedIndirect"))
10822        return (PFN_vkVoidFunction)vkCmdDrawIndexedIndirect;
10823    if (!strcmp(funcName, "vkCmdDispatch"))
10824        return (PFN_vkVoidFunction)vkCmdDispatch;
10825    if (!strcmp(funcName, "vkCmdDispatchIndirect"))
10826        return (PFN_vkVoidFunction)vkCmdDispatchIndirect;
10827    if (!strcmp(funcName, "vkCmdCopyBuffer"))
10828        return (PFN_vkVoidFunction)vkCmdCopyBuffer;
10829    if (!strcmp(funcName, "vkCmdCopyImage"))
10830        return (PFN_vkVoidFunction)vkCmdCopyImage;
10831    if (!strcmp(funcName, "vkCmdBlitImage"))
10832        return (PFN_vkVoidFunction)vkCmdBlitImage;
10833    if (!strcmp(funcName, "vkCmdCopyBufferToImage"))
10834        return (PFN_vkVoidFunction)vkCmdCopyBufferToImage;
10835    if (!strcmp(funcName, "vkCmdCopyImageToBuffer"))
10836        return (PFN_vkVoidFunction)vkCmdCopyImageToBuffer;
10837    if (!strcmp(funcName, "vkCmdUpdateBuffer"))
10838        return (PFN_vkVoidFunction)vkCmdUpdateBuffer;
10839    if (!strcmp(funcName, "vkCmdFillBuffer"))
10840        return (PFN_vkVoidFunction)vkCmdFillBuffer;
10841    if (!strcmp(funcName, "vkCmdClearColorImage"))
10842        return (PFN_vkVoidFunction)vkCmdClearColorImage;
10843    if (!strcmp(funcName, "vkCmdClearDepthStencilImage"))
10844        return (PFN_vkVoidFunction)vkCmdClearDepthStencilImage;
10845    if (!strcmp(funcName, "vkCmdClearAttachments"))
10846        return (PFN_vkVoidFunction)vkCmdClearAttachments;
10847    if (!strcmp(funcName, "vkCmdResolveImage"))
10848        return (PFN_vkVoidFunction)vkCmdResolveImage;
10849    if (!strcmp(funcName, "vkCmdSetEvent"))
10850        return (PFN_vkVoidFunction)vkCmdSetEvent;
10851    if (!strcmp(funcName, "vkCmdResetEvent"))
10852        return (PFN_vkVoidFunction)vkCmdResetEvent;
10853    if (!strcmp(funcName, "vkCmdWaitEvents"))
10854        return (PFN_vkVoidFunction)vkCmdWaitEvents;
10855    if (!strcmp(funcName, "vkCmdPipelineBarrier"))
10856        return (PFN_vkVoidFunction)vkCmdPipelineBarrier;
10857    if (!strcmp(funcName, "vkCmdBeginQuery"))
10858        return (PFN_vkVoidFunction)vkCmdBeginQuery;
10859    if (!strcmp(funcName, "vkCmdEndQuery"))
10860        return (PFN_vkVoidFunction)vkCmdEndQuery;
10861    if (!strcmp(funcName, "vkCmdResetQueryPool"))
10862        return (PFN_vkVoidFunction)vkCmdResetQueryPool;
10863    if (!strcmp(funcName, "vkCmdCopyQueryPoolResults"))
10864        return (PFN_vkVoidFunction)vkCmdCopyQueryPoolResults;
10865    if (!strcmp(funcName, "vkCmdPushConstants"))
10866        return (PFN_vkVoidFunction)vkCmdPushConstants;
10867    if (!strcmp(funcName, "vkCmdWriteTimestamp"))
10868        return (PFN_vkVoidFunction)vkCmdWriteTimestamp;
10869    if (!strcmp(funcName, "vkCreateFramebuffer"))
10870        return (PFN_vkVoidFunction)vkCreateFramebuffer;
10871    if (!strcmp(funcName, "vkCreateShaderModule"))
10872        return (PFN_vkVoidFunction)vkCreateShaderModule;
10873    if (!strcmp(funcName, "vkCreateRenderPass"))
10874        return (PFN_vkVoidFunction)vkCreateRenderPass;
10875    if (!strcmp(funcName, "vkCmdBeginRenderPass"))
10876        return (PFN_vkVoidFunction)vkCmdBeginRenderPass;
10877    if (!strcmp(funcName, "vkCmdNextSubpass"))
10878        return (PFN_vkVoidFunction)vkCmdNextSubpass;
10879    if (!strcmp(funcName, "vkCmdEndRenderPass"))
10880        return (PFN_vkVoidFunction)vkCmdEndRenderPass;
10881    if (!strcmp(funcName, "vkCmdExecuteCommands"))
10882        return (PFN_vkVoidFunction)vkCmdExecuteCommands;
10883    if (!strcmp(funcName, "vkSetEvent"))
10884        return (PFN_vkVoidFunction)vkSetEvent;
10885    if (!strcmp(funcName, "vkMapMemory"))
10886        return (PFN_vkVoidFunction)vkMapMemory;
10887#if MTMERGESOURCE
10888    if (!strcmp(funcName, "vkUnmapMemory"))
10889        return (PFN_vkVoidFunction)vkUnmapMemory;
10890    if (!strcmp(funcName, "vkAllocateMemory"))
10891        return (PFN_vkVoidFunction)vkAllocateMemory;
10892    if (!strcmp(funcName, "vkFreeMemory"))
10893        return (PFN_vkVoidFunction)vkFreeMemory;
10894    if (!strcmp(funcName, "vkFlushMappedMemoryRanges"))
10895        return (PFN_vkVoidFunction)vkFlushMappedMemoryRanges;
10896    if (!strcmp(funcName, "vkInvalidateMappedMemoryRanges"))
10897        return (PFN_vkVoidFunction)vkInvalidateMappedMemoryRanges;
10898    if (!strcmp(funcName, "vkBindBufferMemory"))
10899        return (PFN_vkVoidFunction)vkBindBufferMemory;
10900    if (!strcmp(funcName, "vkGetBufferMemoryRequirements"))
10901        return (PFN_vkVoidFunction)vkGetBufferMemoryRequirements;
10902    if (!strcmp(funcName, "vkGetImageMemoryRequirements"))
10903        return (PFN_vkVoidFunction)vkGetImageMemoryRequirements;
10904#endif
10905    if (!strcmp(funcName, "vkGetQueryPoolResults"))
10906        return (PFN_vkVoidFunction)vkGetQueryPoolResults;
10907    if (!strcmp(funcName, "vkBindImageMemory"))
10908        return (PFN_vkVoidFunction)vkBindImageMemory;
10909    if (!strcmp(funcName, "vkQueueBindSparse"))
10910        return (PFN_vkVoidFunction)vkQueueBindSparse;
10911    if (!strcmp(funcName, "vkCreateSemaphore"))
10912        return (PFN_vkVoidFunction)vkCreateSemaphore;
10913    if (!strcmp(funcName, "vkCreateEvent"))
10914        return (PFN_vkVoidFunction)vkCreateEvent;
10915
10916    if (dev == NULL)
10917        return NULL;
10918
10919    layer_data *dev_data;
10920    dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
10921
10922    if (dev_data->device_extensions.wsi_enabled) {
10923        if (!strcmp(funcName, "vkCreateSwapchainKHR"))
10924            return (PFN_vkVoidFunction)vkCreateSwapchainKHR;
10925        if (!strcmp(funcName, "vkDestroySwapchainKHR"))
10926            return (PFN_vkVoidFunction)vkDestroySwapchainKHR;
10927        if (!strcmp(funcName, "vkGetSwapchainImagesKHR"))
10928            return (PFN_vkVoidFunction)vkGetSwapchainImagesKHR;
10929        if (!strcmp(funcName, "vkAcquireNextImageKHR"))
10930            return (PFN_vkVoidFunction)vkAcquireNextImageKHR;
10931        if (!strcmp(funcName, "vkQueuePresentKHR"))
10932            return (PFN_vkVoidFunction)vkQueuePresentKHR;
10933    }
10934
10935    VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
10936    {
10937        if (pTable->GetDeviceProcAddr == NULL)
10938            return NULL;
10939        return pTable->GetDeviceProcAddr(dev, funcName);
10940    }
10941}
10942
10943VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
10944    if (!strcmp(funcName, "vkGetInstanceProcAddr"))
10945        return (PFN_vkVoidFunction)vkGetInstanceProcAddr;
10946    if (!strcmp(funcName, "vkGetDeviceProcAddr"))
10947        return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
10948    if (!strcmp(funcName, "vkCreateInstance"))
10949        return (PFN_vkVoidFunction)vkCreateInstance;
10950    if (!strcmp(funcName, "vkCreateDevice"))
10951        return (PFN_vkVoidFunction)vkCreateDevice;
10952    if (!strcmp(funcName, "vkDestroyInstance"))
10953        return (PFN_vkVoidFunction)vkDestroyInstance;
10954#if MTMERGESOURCE
10955    if (!strcmp(funcName, "vkGetPhysicalDeviceMemoryProperties"))
10956        return (PFN_vkVoidFunction)vkGetPhysicalDeviceMemoryProperties;
10957#endif
10958    if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties"))
10959        return (PFN_vkVoidFunction)vkEnumerateInstanceLayerProperties;
10960    if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties"))
10961        return (PFN_vkVoidFunction)vkEnumerateInstanceExtensionProperties;
10962    if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties"))
10963        return (PFN_vkVoidFunction)vkEnumerateDeviceLayerProperties;
10964    if (!strcmp(funcName, "vkEnumerateDeviceExtensionProperties"))
10965        return (PFN_vkVoidFunction)vkEnumerateDeviceExtensionProperties;
10966
10967    if (instance == NULL)
10968        return NULL;
10969
10970    PFN_vkVoidFunction fptr;
10971
10972    layer_data *my_data;
10973    my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10974    fptr = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
10975    if (fptr)
10976        return fptr;
10977
10978    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10979    if (pTable->GetInstanceProcAddr == NULL)
10980        return NULL;
10981    return pTable->GetInstanceProcAddr(instance, funcName);
10982}
10983