core_validation.cpp revision c15b801a6e1a5dd5eed09e689aecdde7c4a90a5b
1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and/or associated documentation files (the "Materials"), to
8 * deal in the Materials without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Materials, and to permit persons to whom the Materials
11 * are furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice(s) and this permission notice shall be included
14 * in all copies or substantial portions of the Materials.
15 *
16 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 *
20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE
23 * USE OR OTHER DEALINGS IN THE MATERIALS
24 *
25 * Author: Cody Northrop <cnorthrop@google.com>
26 * Author: Michael Lentine <mlentine@google.com>
27 * Author: Tobin Ehlis <tobine@google.com>
28 * Author: Chia-I Wu <olv@google.com>
29 * Author: Chris Forbes <chrisf@ijw.co.nz>
30 * Author: Mark Lobodzinski <mark@lunarg.com>
31 * Author: Ian Elliott <ianelliott@google.com>
32 */
33
34// Allow use of STL min and max functions in Windows
35#define NOMINMAX
36
37// Turn on mem_tracker merged code
38#define MTMERGESOURCE 1
39
40#include <stdio.h>
41#include <stdlib.h>
42#include <string.h>
43#include <assert.h>
44#include <unordered_map>
45#include <unordered_set>
46#include <map>
47#include <string>
48#include <iostream>
49#include <algorithm>
50#include <list>
51#include <SPIRV/spirv.hpp>
52#include <set>
53
54#include "vk_loader_platform.h"
55#include "vk_dispatch_table_helper.h"
56#include "vk_struct_string_helper_cpp.h"
57#if defined(__GNUC__)
58#pragma GCC diagnostic ignored "-Wwrite-strings"
59#endif
60#if defined(__GNUC__)
61#pragma GCC diagnostic warning "-Wwrite-strings"
62#endif
63#include "vk_struct_size_helper.h"
64#include "core_validation.h"
65#include "vk_layer_config.h"
66#include "vk_layer_table.h"
67#include "vk_layer_data.h"
68#include "vk_layer_logging.h"
69#include "vk_layer_extension_utils.h"
70#include "vk_layer_utils.h"
71
72#if defined __ANDROID__
73#include <android/log.h>
74#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
75#else
76#define LOGCONSOLE(...) printf(__VA_ARGS__)
77#endif
78
79using std::unordered_map;
80using std::unordered_set;
81
82#if MTMERGESOURCE
83// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
84// Object value will be used to identify them internally.
85static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
86#endif
87// Track command pools and their command buffers
88struct CMD_POOL_INFO {
89    VkCommandPoolCreateFlags createFlags;
90    uint32_t queueFamilyIndex;
91    list<VkCommandBuffer> commandBuffers; // list container of cmd buffers allocated from this pool
92};
93
94struct devExts {
95    VkBool32 wsi_enabled;
96    unordered_map<VkSwapchainKHR, SWAPCHAIN_NODE *> swapchainMap;
97    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
98};
99
100// fwd decls
101struct shader_module;
102
103struct layer_data {
104    debug_report_data *report_data;
105    std::vector<VkDebugReportCallbackEXT> logging_callback;
106    VkLayerDispatchTable *device_dispatch_table;
107    VkLayerInstanceDispatchTable *instance_dispatch_table;
108#if MTMERGESOURCE
109// MTMERGESOURCE - stuff pulled directly from MT
110    uint64_t currentFenceId;
111    // Maps for tracking key structs related to mem_tracker state
112    // Images and Buffers are 2 objects that can have memory bound to them so they get special treatment
113    unordered_map<uint64_t, MT_OBJ_BINDING_INFO> imageBindingMap;
114    unordered_map<uint64_t, MT_OBJ_BINDING_INFO> bufferBindingMap;
115// MTMERGESOURCE - End of MT stuff
116#endif
117    devExts device_extensions;
118    unordered_set<VkQueue> queues;  // all queues under given device
119    // Global set of all cmdBuffers that are inFlight on this device
120    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
121    // Layer specific data
122    unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> sampleMap;
123    unordered_map<VkImageView, VkImageViewCreateInfo> imageViewMap;
124    unordered_map<VkImage, IMAGE_NODE> imageMap;
125    unordered_map<VkBufferView, VkBufferViewCreateInfo> bufferViewMap;
126    unordered_map<VkBuffer, BUFFER_NODE> bufferMap;
127    unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
128    unordered_map<VkCommandPool, CMD_POOL_INFO> commandPoolMap;
129    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
130    unordered_map<VkDescriptorSet, SET_NODE *> setMap;
131    unordered_map<VkDescriptorSetLayout, LAYOUT_NODE *> descriptorSetLayoutMap;
132    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
133    unordered_map<VkDeviceMemory, DEVICE_MEM_INFO> memObjMap;
134    unordered_map<VkFence, FENCE_NODE> fenceMap;
135    unordered_map<VkQueue, QUEUE_NODE> queueMap;
136    unordered_map<VkEvent, EVENT_NODE> eventMap;
137    unordered_map<QueryObject, bool> queryToStateMap;
138    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
139    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
140    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
141    unordered_map<VkFramebuffer, FRAMEBUFFER_NODE> frameBufferMap;
142    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
143    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
144    unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
145    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
146    // Current render pass
147    VkRenderPassBeginInfo renderPassBeginInfo;
148    uint32_t currentSubpass;
149    VkDevice device;
150
151    // Device specific data
152    PHYS_DEV_PROPERTIES_NODE physDevProperties;
153// MTMERGESOURCE - added a couple of fields to constructor initializer
154    layer_data()
155        : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr),
156#if MTMERGESOURCE
157        currentFenceId(1),
158#endif
159        device_extensions(){};
160};
161
162static const VkLayerProperties cv_global_layers[] = {{
163    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
164}};
165
166template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
167    bool foundLayer = false;
168    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
169        if (!strcmp(createInfo.ppEnabledLayerNames[i], cv_global_layers[0].layerName)) {
170            foundLayer = true;
171        }
172        // This has to be logged to console as we don't have a callback at this point.
173        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
174            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
175                       cv_global_layers[0].layerName);
176        }
177    }
178}
179
180// Code imported from shader_checker
181static void build_def_index(shader_module *);
182
183// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
184// without the caller needing to care too much about the physical SPIRV module layout.
185struct spirv_inst_iter {
186    std::vector<uint32_t>::const_iterator zero;
187    std::vector<uint32_t>::const_iterator it;
188
189    uint32_t len() { return *it >> 16; }
190    uint32_t opcode() { return *it & 0x0ffffu; }
191    uint32_t const &word(unsigned n) { return it[n]; }
192    uint32_t offset() { return (uint32_t)(it - zero); }
193
194    spirv_inst_iter() {}
195
196    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
197
198    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
199
200    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
201
202    spirv_inst_iter operator++(int) { /* x++ */
203        spirv_inst_iter ii = *this;
204        it += len();
205        return ii;
206    }
207
208    spirv_inst_iter operator++() { /* ++x; */
209        it += len();
210        return *this;
211    }
212
213    /* The iterator and the value are the same thing. */
214    spirv_inst_iter &operator*() { return *this; }
215    spirv_inst_iter const &operator*() const { return *this; }
216};
217
218struct shader_module {
219    /* the spirv image itself */
220    vector<uint32_t> words;
221    /* a mapping of <id> to the first word of its def. this is useful because walking type
222     * trees, constant expressions, etc requires jumping all over the instruction stream.
223     */
224    unordered_map<unsigned, unsigned> def_index;
225
226    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
227        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
228          def_index() {
229
230        build_def_index(this);
231    }
232
233    /* expose begin() / end() to enable range-based for */
234    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
235    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
236    /* given an offset into the module, produce an iterator there. */
237    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
238
239    /* gets an iterator to the definition of an id */
240    spirv_inst_iter get_def(unsigned id) const {
241        auto it = def_index.find(id);
242        if (it == def_index.end()) {
243            return end();
244        }
245        return at(it->second);
246    }
247};
248
249// TODO : Do we need to guard access to layer_data_map w/ lock?
250static unordered_map<void *, layer_data *> layer_data_map;
251
252// TODO : This can be much smarter, using separate locks for separate global data
253static int globalLockInitialized = 0;
254static loader_platform_thread_mutex globalLock;
255#if MTMERGESOURCE
256// MTMERGESOURCE - start of direct pull
257static VkPhysicalDeviceMemoryProperties memProps;
258
259static void clear_cmd_buf_and_mem_references(layer_data *my_data, const VkCommandBuffer cb);
260
261#define MAX_BINDING 0xFFFFFFFF
262
263static MT_OBJ_BINDING_INFO *get_object_binding_info(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
264    MT_OBJ_BINDING_INFO *retValue = NULL;
265    switch (type) {
266    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
267        auto it = my_data->imageBindingMap.find(handle);
268        if (it != my_data->imageBindingMap.end())
269            return &(*it).second;
270        break;
271    }
272    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
273        auto it = my_data->bufferBindingMap.find(handle);
274        if (it != my_data->bufferBindingMap.end())
275            return &(*it).second;
276        break;
277    }
278    default:
279        break;
280    }
281    return retValue;
282}
283// MTMERGESOURCE - end section
284#endif
285template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map);
286
287// prototype
288static GLOBAL_CB_NODE *getCBNode(layer_data *, const VkCommandBuffer);
289
290#if MTMERGESOURCE
291static void delete_queue_info_list(layer_data *my_data) {
292    // Process queue list, cleaning up each entry before deleting
293    my_data->queueMap.clear();
294}
295
296// Delete CBInfo from container and clear mem references to CB
297static void delete_cmd_buf_info(layer_data *my_data, VkCommandPool commandPool, const VkCommandBuffer cb) {
298    clear_cmd_buf_and_mem_references(my_data, cb);
299    // Delete the CBInfo info
300    my_data->commandPoolMap[commandPool].commandBuffers.remove(cb);
301    my_data->commandBufferMap.erase(cb);
302}
303
304static void add_object_binding_info(layer_data *my_data, const uint64_t handle, const VkDebugReportObjectTypeEXT type,
305                                    const VkDeviceMemory mem) {
306    switch (type) {
307    // Buffers and images are unique as their CreateInfo is in container struct
308    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
309        auto pCI = &my_data->bufferBindingMap[handle];
310        pCI->mem = mem;
311        break;
312    }
313    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
314        auto pCI = &my_data->imageBindingMap[handle];
315        pCI->mem = mem;
316        break;
317    }
318    default:
319        break;
320    }
321}
322
323static void add_object_create_info(layer_data *my_data, const uint64_t handle, const VkDebugReportObjectTypeEXT type,
324                                   const void *pCreateInfo) {
325    // TODO : For any CreateInfo struct that has ptrs, need to deep copy them and appropriately clean up on Destroy
326    switch (type) {
327    // Buffers and images are unique as their CreateInfo is in container struct
328    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
329        auto pCI = &my_data->bufferBindingMap[handle];
330        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
331        memcpy(&pCI->create_info.buffer, pCreateInfo, sizeof(VkBufferCreateInfo));
332        break;
333    }
334    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
335        auto pCI = &my_data->imageBindingMap[handle];
336        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
337        memcpy(&pCI->create_info.image, pCreateInfo, sizeof(VkImageCreateInfo));
338        break;
339    }
340    // Swap Chain is very unique, use my_data->imageBindingMap, but copy in
341    // SwapChainCreatInfo's usage flags and set the mem value to a unique key. These is used by
342    // vkCreateImageView and internal mem_tracker routines to distinguish swap chain images
343    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT: {
344        auto pCI = &my_data->imageBindingMap[handle];
345        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
346        pCI->mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
347        pCI->valid = false;
348        pCI->create_info.image.usage =
349            const_cast<VkSwapchainCreateInfoKHR *>(static_cast<const VkSwapchainCreateInfoKHR *>(pCreateInfo))->imageUsage;
350        break;
351    }
352    default:
353        break;
354    }
355}
356
357// Add a fence, creating one if necessary to our list of fences/fenceIds
358static VkBool32 add_fence_info(layer_data *my_data, VkFence fence, VkQueue queue, uint64_t *fenceId) {
359    VkBool32 skipCall = VK_FALSE;
360    *fenceId = my_data->currentFenceId++;
361
362    // If no fence, create an internal fence to track the submissions
363    if (fence != VK_NULL_HANDLE) {
364        my_data->fenceMap[fence].fenceId = *fenceId;
365        my_data->fenceMap[fence].queue = queue;
366        // Validate that fence is in UNSIGNALED state
367        VkFenceCreateInfo *pFenceCI = &(my_data->fenceMap[fence].createInfo);
368        if (pFenceCI->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
369            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
370                               (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
371                               "Fence %#" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
372                               (uint64_t)fence);
373        }
374    } else {
375        // TODO : Do we need to create an internal fence here for tracking purposes?
376    }
377    // Update most recently submitted fence and fenceId for Queue
378    my_data->queueMap[queue].lastSubmittedId = *fenceId;
379    return skipCall;
380}
381
382// Remove a fenceInfo from our list of fences/fenceIds
383static void delete_fence_info(layer_data *my_data, VkFence fence) { my_data->fenceMap.erase(fence); }
384
385// Record information when a fence is known to be signalled
386static void update_fence_tracking(layer_data *my_data, VkFence fence) {
387    auto fence_item = my_data->fenceMap.find(fence);
388    if (fence_item != my_data->fenceMap.end()) {
389        FENCE_NODE *pCurFenceInfo = &(*fence_item).second;
390        VkQueue queue = pCurFenceInfo->queue;
391        auto queue_item = my_data->queueMap.find(queue);
392        if (queue_item != my_data->queueMap.end()) {
393            QUEUE_NODE *pQueueInfo = &(*queue_item).second;
394            if (pQueueInfo->lastRetiredId < pCurFenceInfo->fenceId) {
395                pQueueInfo->lastRetiredId = pCurFenceInfo->fenceId;
396            }
397        }
398    }
399
400    // Update fence state in fenceCreateInfo structure
401    auto pFCI = &(my_data->fenceMap[fence].createInfo);
402    pFCI->flags = static_cast<VkFenceCreateFlags>(pFCI->flags | VK_FENCE_CREATE_SIGNALED_BIT);
403}
404
405// Helper routine that updates the fence list for a specific queue to all-retired
406static void retire_queue_fences(layer_data *my_data, VkQueue queue) {
407    QUEUE_NODE *pQueueInfo = &my_data->queueMap[queue];
408    // Set queue's lastRetired to lastSubmitted indicating all fences completed
409    pQueueInfo->lastRetiredId = pQueueInfo->lastSubmittedId;
410}
411
412// Helper routine that updates all queues to all-retired
413static void retire_device_fences(layer_data *my_data, VkDevice device) {
414    // Process each queue for device
415    // TODO: Add multiple device support
416    for (auto ii = my_data->queueMap.begin(); ii != my_data->queueMap.end(); ++ii) {
417        // Set queue's lastRetired to lastSubmitted indicating all fences completed
418        QUEUE_NODE *pQueueInfo = &(*ii).second;
419        pQueueInfo->lastRetiredId = pQueueInfo->lastSubmittedId;
420    }
421}
422
423// Helper function to validate correct usage bits set for buffers or images
424//  Verify that (actual & desired) flags != 0 or,
425//   if strict is true, verify that (actual & desired) flags == desired
426//  In case of error, report it via dbg callbacks
427static VkBool32 validate_usage_flags(layer_data *my_data, void *disp_obj, VkFlags actual, VkFlags desired, VkBool32 strict,
428                                     uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
429                                     char const *func_name, char const *usage_str) {
430    VkBool32 correct_usage = VK_FALSE;
431    VkBool32 skipCall = VK_FALSE;
432    if (strict)
433        correct_usage = ((actual & desired) == desired);
434    else
435        correct_usage = ((actual & desired) != 0);
436    if (!correct_usage) {
437        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
438                           MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s %#" PRIxLEAST64
439                                                               " used by %s. In this case, %s should have %s set during creation.",
440                           ty_str, obj_handle, func_name, ty_str, usage_str);
441    }
442    return skipCall;
443}
444
445// Helper function to validate usage flags for images
446// Pulls image info and then sends actual vs. desired usage off to helper above where
447//  an error will be flagged if usage is not correct
448static VkBool32 validate_image_usage_flags(layer_data *my_data, void *disp_obj, VkImage image, VkFlags desired, VkBool32 strict,
449                                           char const *func_name, char const *usage_string) {
450    VkBool32 skipCall = VK_FALSE;
451    MT_OBJ_BINDING_INFO *pBindInfo = get_object_binding_info(my_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
452    if (pBindInfo) {
453        skipCall = validate_usage_flags(my_data, disp_obj, pBindInfo->create_info.image.usage, desired, strict, (uint64_t)image,
454                                        VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "image", func_name, usage_string);
455    }
456    return skipCall;
457}
458
459// Helper function to validate usage flags for buffers
460// Pulls buffer info and then sends actual vs. desired usage off to helper above where
461//  an error will be flagged if usage is not correct
462static VkBool32 validate_buffer_usage_flags(layer_data *my_data, void *disp_obj, VkBuffer buffer, VkFlags desired, VkBool32 strict,
463                                            char const *func_name, char const *usage_string) {
464    VkBool32 skipCall = VK_FALSE;
465    MT_OBJ_BINDING_INFO *pBindInfo = get_object_binding_info(my_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
466    if (pBindInfo) {
467        skipCall = validate_usage_flags(my_data, disp_obj, pBindInfo->create_info.buffer.usage, desired, strict, (uint64_t)buffer,
468                                        VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string);
469    }
470    return skipCall;
471}
472
473// Return ptr to info in map container containing mem, or NULL if not found
474//  Calls to this function should be wrapped in mutex
475static DEVICE_MEM_INFO *get_mem_obj_info(layer_data *dev_data, const VkDeviceMemory mem) {
476    auto item = dev_data->memObjMap.find(mem);
477    if (item != dev_data->memObjMap.end()) {
478        return &(*item).second;
479    } else {
480        return NULL;
481    }
482}
483
484static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
485                             const VkMemoryAllocateInfo *pAllocateInfo) {
486    assert(object != NULL);
487
488    memcpy(&my_data->memObjMap[mem].allocInfo, pAllocateInfo, sizeof(VkMemoryAllocateInfo));
489    // TODO:  Update for real hardware, actually process allocation info structures
490    my_data->memObjMap[mem].allocInfo.pNext = NULL;
491    my_data->memObjMap[mem].object = object;
492    my_data->memObjMap[mem].mem = mem;
493    my_data->memObjMap[mem].image = VK_NULL_HANDLE;
494    my_data->memObjMap[mem].memRange.offset = 0;
495    my_data->memObjMap[mem].memRange.size = 0;
496    my_data->memObjMap[mem].pData = 0;
497    my_data->memObjMap[mem].pDriverData = 0;
498    my_data->memObjMap[mem].valid = false;
499}
500
501static VkBool32 validate_memory_is_valid(layer_data *dev_data, VkDeviceMemory mem, const char *functionName,
502                                         VkImage image = VK_NULL_HANDLE) {
503    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
504        MT_OBJ_BINDING_INFO *pBindInfo =
505            get_object_binding_info(dev_data, reinterpret_cast<const uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
506        if (pBindInfo && !pBindInfo->valid) {
507            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
508                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
509                           "%s: Cannot read invalid swapchain image %" PRIx64 ", please fill the memory before using.",
510                           functionName, (uint64_t)(image));
511        }
512    } else {
513        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
514        if (pMemObj && !pMemObj->valid) {
515            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
516                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
517                           "%s: Cannot read invalid memory %" PRIx64 ", please fill the memory before using.", functionName,
518                           (uint64_t)(mem));
519        }
520    }
521    return false;
522}
523
524static void set_memory_valid(layer_data *dev_data, VkDeviceMemory mem, bool valid, VkImage image = VK_NULL_HANDLE) {
525    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
526        MT_OBJ_BINDING_INFO *pBindInfo =
527            get_object_binding_info(dev_data, reinterpret_cast<const uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
528        if (pBindInfo) {
529            pBindInfo->valid = valid;
530        }
531    } else {
532        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
533        if (pMemObj) {
534            pMemObj->valid = valid;
535        }
536    }
537}
538
539// Find CB Info and add mem reference to list container
540// Find Mem Obj Info and add CB reference to list container
541static VkBool32 update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
542                                                  const char *apiName) {
543    VkBool32 skipCall = VK_FALSE;
544
545    // Skip validation if this image was created through WSI
546    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
547
548        // First update CB binding in MemObj mini CB list
549        DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
550        if (pMemInfo) {
551            pMemInfo->commandBufferBindings.insert(cb);
552            // Now update CBInfo's Mem reference list
553            GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
554            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
555            if (pCBNode) {
556                pCBNode->memObjs.insert(mem);
557            }
558        }
559    }
560    return skipCall;
561}
562
563// Free bindings related to CB
564static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
565    GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
566
567    if (pCBNode) {
568        if (pCBNode->memObjs.size() > 0) {
569            for (auto mem : pCBNode->memObjs) {
570                DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
571                if (pInfo) {
572                    pInfo->commandBufferBindings.erase(cb);
573                }
574            }
575            pCBNode->memObjs.clear();
576        }
577        pCBNode->validate_functions.clear();
578    }
579}
580
581// Delete the entire CB list
582static void delete_cmd_buf_info_list(layer_data *my_data) {
583    for (auto &cb_node : my_data->commandBufferMap) {
584        clear_cmd_buf_and_mem_references(my_data, cb_node.first);
585    }
586    my_data->commandBufferMap.clear();
587}
588
589// For given MemObjInfo, report Obj & CB bindings
590static VkBool32 reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
591    VkBool32 skipCall = VK_FALSE;
592    size_t cmdBufRefCount = pMemObjInfo->commandBufferBindings.size();
593    size_t objRefCount = pMemObjInfo->objBindings.size();
594
595    if ((pMemObjInfo->commandBufferBindings.size()) != 0) {
596        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
597                           (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
598                           "Attempting to free memory object %#" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
599                           " references",
600                           (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
601    }
602
603    if (cmdBufRefCount > 0 && pMemObjInfo->commandBufferBindings.size() > 0) {
604        for (auto cb : pMemObjInfo->commandBufferBindings) {
605            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
606                    (uint64_t)cb, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
607                    "Command Buffer %p still has a reference to mem obj %#" PRIxLEAST64, cb, (uint64_t)pMemObjInfo->mem);
608        }
609        // Clear the list of hanging references
610        pMemObjInfo->commandBufferBindings.clear();
611    }
612
613    if (objRefCount > 0 && pMemObjInfo->objBindings.size() > 0) {
614        for (auto obj : pMemObjInfo->objBindings) {
615            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__,
616                    MEMTRACK_FREED_MEM_REF, "MEM", "VK Object %#" PRIxLEAST64 " still has a reference to mem obj %#" PRIxLEAST64,
617                    obj.handle, (uint64_t)pMemObjInfo->mem);
618        }
619        // Clear the list of hanging references
620        pMemObjInfo->objBindings.clear();
621    }
622    return skipCall;
623}
624
625static VkBool32 deleteMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem) {
626    VkBool32 skipCall = VK_FALSE;
627    auto item = my_data->memObjMap.find(mem);
628    if (item != my_data->memObjMap.end()) {
629        my_data->memObjMap.erase(item);
630    } else {
631        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
632                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
633                           "Request to delete memory object %#" PRIxLEAST64 " not present in memory Object Map", (uint64_t)mem);
634    }
635    return skipCall;
636}
637
638// Check if fence for given CB is completed
639static bool checkCBCompleted(layer_data *my_data, const VkCommandBuffer cb, bool *complete) {
640    GLOBAL_CB_NODE *pCBNode = getCBNode(my_data, cb);
641    VkBool32 skipCall = false;
642    *complete = true;
643
644    if (pCBNode) {
645        if (pCBNode->lastSubmittedQueue != NULL) {
646            VkQueue queue = pCBNode->lastSubmittedQueue;
647            QUEUE_NODE *pQueueInfo = &my_data->queueMap[queue];
648            if (pCBNode->fenceId > pQueueInfo->lastRetiredId) {
649                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
650                                   VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)cb, __LINE__, MEMTRACK_NONE, "MEM",
651                                   "fence %#" PRIxLEAST64 " for CB %p has not been checked for completion",
652                                   (uint64_t)pCBNode->lastSubmittedFence, cb);
653                *complete = false;
654            }
655        }
656    }
657    return skipCall;
658}
659
660static VkBool32 freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, VkBool32 internal) {
661    VkBool32 skipCall = VK_FALSE;
662    // Parse global list to find info w/ mem
663    DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
664    if (pInfo) {
665        if (pInfo->allocInfo.allocationSize == 0 && !internal) {
666            // TODO: Verify against Valid Use section
667            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
668                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
669                               "Attempting to free memory associated with a Persistent Image, %#" PRIxLEAST64 ", "
670                               "this should not be explicitly freed\n",
671                               (uint64_t)mem);
672        } else {
673            // Clear any CB bindings for completed CBs
674            //   TODO : Is there a better place to do this?
675
676            assert(pInfo->object != VK_NULL_HANDLE);
677            // clear_cmd_buf_and_mem_references removes elements from
678            // pInfo->commandBufferBindings -- this copy not needed in c++14,
679            // and probably not needed in practice in c++11
680            auto bindings = pInfo->commandBufferBindings;
681            for (auto cb : bindings) {
682                bool commandBufferComplete = false;
683                skipCall |= checkCBCompleted(dev_data, cb, &commandBufferComplete);
684                if (commandBufferComplete) {
685                    clear_cmd_buf_and_mem_references(dev_data, cb);
686                }
687            }
688
689            // Now verify that no references to this mem obj remain and remove bindings
690            if (pInfo->commandBufferBindings.size() || pInfo->objBindings.size()) {
691                skipCall |= reportMemReferencesAndCleanUp(dev_data, pInfo);
692            }
693            // Delete mem obj info
694            skipCall |= deleteMemObjInfo(dev_data, object, mem);
695        }
696    }
697    return skipCall;
698}
699
700static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
701    switch (type) {
702    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
703        return "image";
704    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
705        return "buffer";
706    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
707        return "swapchain";
708    default:
709        return "unknown";
710    }
711}
712
713// Remove object binding performs 3 tasks:
714// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
715// 2. Clear mem binding for image/buffer by setting its handle to 0
716// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
717static VkBool32 clear_object_binding(layer_data *dev_data, void *dispObj, uint64_t handle, VkDebugReportObjectTypeEXT type) {
718    // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
719    VkBool32 skipCall = VK_FALSE;
720    MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
721    if (pObjBindInfo) {
722        DEVICE_MEM_INFO *pMemObjInfo = get_mem_obj_info(dev_data, pObjBindInfo->mem);
723        // TODO : Make sure this is a reasonable way to reset mem binding
724        pObjBindInfo->mem = VK_NULL_HANDLE;
725        if (pMemObjInfo) {
726            // This obj is bound to a memory object. Remove the reference to this object in that memory object's list,
727            // and set the objects memory binding pointer to NULL.
728            if (!pMemObjInfo->objBindings.erase({handle, type})) {
729                skipCall |=
730                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
731                            "MEM", "While trying to clear mem binding for %s obj %#" PRIxLEAST64
732                                   ", unable to find that object referenced by mem obj %#" PRIxLEAST64,
733                            object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
734            }
735        }
736    }
737    return skipCall;
738}
739
740// For NULL mem case, output warning
741// Make sure given object is in global object map
742//  IF a previous binding existed, output validation error
743//  Otherwise, add reference from objectInfo to memoryInfo
744//  Add reference off of objInfo
745//  device is required for error logging, need a dispatchable
746//  object for that.
747static VkBool32 set_mem_binding(layer_data *dev_data, void *dispatch_object, VkDeviceMemory mem, uint64_t handle,
748                                VkDebugReportObjectTypeEXT type, const char *apiName) {
749    VkBool32 skipCall = VK_FALSE;
750    // Handle NULL case separately, just clear previous binding & decrement reference
751    if (mem == VK_NULL_HANDLE) {
752        // TODO: Verify against Valid Use section of spec.
753        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
754                           "MEM", "In %s, attempting to Bind Obj(%#" PRIxLEAST64 ") to NULL", apiName, handle);
755    } else {
756        MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
757        if (!pObjBindInfo) {
758            skipCall |=
759                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS,
760                        "MEM", "In %s, attempting to update Binding of %s Obj(%#" PRIxLEAST64 ") that's not in global list",
761                        object_type_to_string(type), apiName, handle);
762        } else {
763            // non-null case so should have real mem obj
764            DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
765            if (pMemInfo) {
766                // TODO : Need to track mem binding for obj and report conflict here
767                DEVICE_MEM_INFO *pPrevBinding = get_mem_obj_info(dev_data, pObjBindInfo->mem);
768                if (pPrevBinding != NULL) {
769                    skipCall |=
770                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
771                                (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
772                                "In %s, attempting to bind memory (%#" PRIxLEAST64 ") to object (%#" PRIxLEAST64
773                                ") which has already been bound to mem object %#" PRIxLEAST64,
774                                apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem);
775                } else {
776                    pMemInfo->objBindings.insert({handle, type});
777                    // For image objects, make sure default memory state is correctly set
778                    // TODO : What's the best/correct way to handle this?
779                    if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
780                        VkImageCreateInfo ici = pObjBindInfo->create_info.image;
781                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
782                            // TODO::  More memory state transition stuff.
783                        }
784                    }
785                    pObjBindInfo->mem = mem;
786                }
787            }
788        }
789    }
790    return skipCall;
791}
792
793// For NULL mem case, clear any previous binding Else...
794// Make sure given object is in its object map
795//  IF a previous binding existed, update binding
796//  Add reference from objectInfo to memoryInfo
797//  Add reference off of object's binding info
798// Return VK_TRUE if addition is successful, VK_FALSE otherwise
799static VkBool32 set_sparse_mem_binding(layer_data *dev_data, void *dispObject, VkDeviceMemory mem, uint64_t handle,
800                                       VkDebugReportObjectTypeEXT type, const char *apiName) {
801    VkBool32 skipCall = VK_FALSE;
802    // Handle NULL case separately, just clear previous binding & decrement reference
803    if (mem == VK_NULL_HANDLE) {
804        skipCall = clear_object_binding(dev_data, dispObject, handle, type);
805    } else {
806        MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
807        if (!pObjBindInfo) {
808            skipCall |= log_msg(
809                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS, "MEM",
810                "In %s, attempting to update Binding of Obj(%#" PRIxLEAST64 ") that's not in global list()", apiName, handle);
811        }
812        // non-null case so should have real mem obj
813        DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
814        if (pInfo) {
815            pInfo->objBindings.insert({handle, type});
816            // Need to set mem binding for this object
817            pObjBindInfo->mem = mem;
818        }
819    }
820    return skipCall;
821}
822
823template <typename T>
824void print_object_map_members(layer_data *my_data, void *dispObj, T const &objectName, VkDebugReportObjectTypeEXT objectType,
825                              const char *objectStr) {
826    for (auto const &element : objectName) {
827        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objectType, 0, __LINE__, MEMTRACK_NONE, "MEM",
828                "    %s Object list contains %s Object %#" PRIxLEAST64 " ", objectStr, objectStr, element.first);
829    }
830}
831
832// For given Object, get 'mem' obj that it's bound to or NULL if no binding
833static VkBool32 get_mem_binding_from_object(layer_data *my_data, void *dispObj, const uint64_t handle,
834                                            const VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
835    VkBool32 skipCall = VK_FALSE;
836    *mem = VK_NULL_HANDLE;
837    MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(my_data, handle, type);
838    if (pObjBindInfo) {
839        if (pObjBindInfo->mem) {
840            *mem = pObjBindInfo->mem;
841        } else {
842            skipCall =
843                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS,
844                        "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but object has no mem binding", handle);
845        }
846    } else {
847        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
848                           "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but no such object in %s list", handle,
849                           object_type_to_string(type));
850    }
851    return skipCall;
852}
853
854// Print details of MemObjInfo list
855static void print_mem_list(layer_data *dev_data, void *dispObj) {
856    DEVICE_MEM_INFO *pInfo = NULL;
857
858    // Early out if info is not requested
859    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
860        return;
861    }
862
863    // Just printing each msg individually for now, may want to package these into single large print
864    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
865            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
866            dev_data->memObjMap.size());
867    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
868            MEMTRACK_NONE, "MEM", "=============================");
869
870    if (dev_data->memObjMap.size() <= 0)
871        return;
872
873    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
874        pInfo = &(*ii).second;
875
876        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
877                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at %p===", (void *)pInfo);
878        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
879                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: %#" PRIxLEAST64, (uint64_t)(pInfo->mem));
880        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
881                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: " PRINTF_SIZE_T_SPECIFIER,
882                pInfo->commandBufferBindings.size() + pInfo->objBindings.size());
883        if (0 != pInfo->allocInfo.allocationSize) {
884            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&pInfo->allocInfo, "MEM(INFO):         ");
885            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
886                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
887        } else {
888            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
889                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
890        }
891
892        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
893                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
894                pInfo->objBindings.size());
895        if (pInfo->objBindings.size() > 0) {
896            for (auto obj : pInfo->objBindings) {
897                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
898                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT %" PRIu64, obj.handle);
899            }
900        }
901
902        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
903                __LINE__, MEMTRACK_NONE, "MEM",
904                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
905                pInfo->commandBufferBindings.size());
906        if (pInfo->commandBufferBindings.size() > 0) {
907            for (auto cb : pInfo->commandBufferBindings) {
908                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
909                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB %p", cb);
910            }
911        }
912    }
913}
914
915static void printCBList(layer_data *my_data, void *dispObj) {
916    GLOBAL_CB_NODE *pCBInfo = NULL;
917
918    // Early out if info is not requested
919    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
920        return;
921    }
922
923    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
924            MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
925            my_data->commandBufferMap.size());
926    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
927            MEMTRACK_NONE, "MEM", "==================");
928
929    if (my_data->commandBufferMap.size() <= 0)
930        return;
931
932    for (auto &cb_node : my_data->commandBufferMap) {
933        pCBInfo = cb_node.second;
934
935        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
936                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (%p) has CB %p, fenceId %" PRIx64 ", and fence %#" PRIxLEAST64,
937                (void *)pCBInfo, (void *)pCBInfo->commandBuffer, pCBInfo->fenceId, (uint64_t)pCBInfo->lastSubmittedFence);
938
939        if (pCBInfo->memObjs.size() <= 0)
940            continue;
941        for (auto obj : pCBInfo->memObjs) {
942            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
943                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj %" PRIu64, (uint64_t)obj);
944        }
945    }
946}
947
948#endif
949
950// Return a string representation of CMD_TYPE enum
951static string cmdTypeToString(CMD_TYPE cmd) {
952    switch (cmd) {
953    case CMD_BINDPIPELINE:
954        return "CMD_BINDPIPELINE";
955    case CMD_BINDPIPELINEDELTA:
956        return "CMD_BINDPIPELINEDELTA";
957    case CMD_SETVIEWPORTSTATE:
958        return "CMD_SETVIEWPORTSTATE";
959    case CMD_SETLINEWIDTHSTATE:
960        return "CMD_SETLINEWIDTHSTATE";
961    case CMD_SETDEPTHBIASSTATE:
962        return "CMD_SETDEPTHBIASSTATE";
963    case CMD_SETBLENDSTATE:
964        return "CMD_SETBLENDSTATE";
965    case CMD_SETDEPTHBOUNDSSTATE:
966        return "CMD_SETDEPTHBOUNDSSTATE";
967    case CMD_SETSTENCILREADMASKSTATE:
968        return "CMD_SETSTENCILREADMASKSTATE";
969    case CMD_SETSTENCILWRITEMASKSTATE:
970        return "CMD_SETSTENCILWRITEMASKSTATE";
971    case CMD_SETSTENCILREFERENCESTATE:
972        return "CMD_SETSTENCILREFERENCESTATE";
973    case CMD_BINDDESCRIPTORSETS:
974        return "CMD_BINDDESCRIPTORSETS";
975    case CMD_BINDINDEXBUFFER:
976        return "CMD_BINDINDEXBUFFER";
977    case CMD_BINDVERTEXBUFFER:
978        return "CMD_BINDVERTEXBUFFER";
979    case CMD_DRAW:
980        return "CMD_DRAW";
981    case CMD_DRAWINDEXED:
982        return "CMD_DRAWINDEXED";
983    case CMD_DRAWINDIRECT:
984        return "CMD_DRAWINDIRECT";
985    case CMD_DRAWINDEXEDINDIRECT:
986        return "CMD_DRAWINDEXEDINDIRECT";
987    case CMD_DISPATCH:
988        return "CMD_DISPATCH";
989    case CMD_DISPATCHINDIRECT:
990        return "CMD_DISPATCHINDIRECT";
991    case CMD_COPYBUFFER:
992        return "CMD_COPYBUFFER";
993    case CMD_COPYIMAGE:
994        return "CMD_COPYIMAGE";
995    case CMD_BLITIMAGE:
996        return "CMD_BLITIMAGE";
997    case CMD_COPYBUFFERTOIMAGE:
998        return "CMD_COPYBUFFERTOIMAGE";
999    case CMD_COPYIMAGETOBUFFER:
1000        return "CMD_COPYIMAGETOBUFFER";
1001    case CMD_CLONEIMAGEDATA:
1002        return "CMD_CLONEIMAGEDATA";
1003    case CMD_UPDATEBUFFER:
1004        return "CMD_UPDATEBUFFER";
1005    case CMD_FILLBUFFER:
1006        return "CMD_FILLBUFFER";
1007    case CMD_CLEARCOLORIMAGE:
1008        return "CMD_CLEARCOLORIMAGE";
1009    case CMD_CLEARATTACHMENTS:
1010        return "CMD_CLEARCOLORATTACHMENT";
1011    case CMD_CLEARDEPTHSTENCILIMAGE:
1012        return "CMD_CLEARDEPTHSTENCILIMAGE";
1013    case CMD_RESOLVEIMAGE:
1014        return "CMD_RESOLVEIMAGE";
1015    case CMD_SETEVENT:
1016        return "CMD_SETEVENT";
1017    case CMD_RESETEVENT:
1018        return "CMD_RESETEVENT";
1019    case CMD_WAITEVENTS:
1020        return "CMD_WAITEVENTS";
1021    case CMD_PIPELINEBARRIER:
1022        return "CMD_PIPELINEBARRIER";
1023    case CMD_BEGINQUERY:
1024        return "CMD_BEGINQUERY";
1025    case CMD_ENDQUERY:
1026        return "CMD_ENDQUERY";
1027    case CMD_RESETQUERYPOOL:
1028        return "CMD_RESETQUERYPOOL";
1029    case CMD_COPYQUERYPOOLRESULTS:
1030        return "CMD_COPYQUERYPOOLRESULTS";
1031    case CMD_WRITETIMESTAMP:
1032        return "CMD_WRITETIMESTAMP";
1033    case CMD_INITATOMICCOUNTERS:
1034        return "CMD_INITATOMICCOUNTERS";
1035    case CMD_LOADATOMICCOUNTERS:
1036        return "CMD_LOADATOMICCOUNTERS";
1037    case CMD_SAVEATOMICCOUNTERS:
1038        return "CMD_SAVEATOMICCOUNTERS";
1039    case CMD_BEGINRENDERPASS:
1040        return "CMD_BEGINRENDERPASS";
1041    case CMD_ENDRENDERPASS:
1042        return "CMD_ENDRENDERPASS";
1043    default:
1044        return "UNKNOWN";
1045    }
1046}
1047
1048// SPIRV utility functions
1049static void build_def_index(shader_module *module) {
1050    for (auto insn : *module) {
1051        switch (insn.opcode()) {
1052        /* Types */
1053        case spv::OpTypeVoid:
1054        case spv::OpTypeBool:
1055        case spv::OpTypeInt:
1056        case spv::OpTypeFloat:
1057        case spv::OpTypeVector:
1058        case spv::OpTypeMatrix:
1059        case spv::OpTypeImage:
1060        case spv::OpTypeSampler:
1061        case spv::OpTypeSampledImage:
1062        case spv::OpTypeArray:
1063        case spv::OpTypeRuntimeArray:
1064        case spv::OpTypeStruct:
1065        case spv::OpTypeOpaque:
1066        case spv::OpTypePointer:
1067        case spv::OpTypeFunction:
1068        case spv::OpTypeEvent:
1069        case spv::OpTypeDeviceEvent:
1070        case spv::OpTypeReserveId:
1071        case spv::OpTypeQueue:
1072        case spv::OpTypePipe:
1073            module->def_index[insn.word(1)] = insn.offset();
1074            break;
1075
1076        /* Fixed constants */
1077        case spv::OpConstantTrue:
1078        case spv::OpConstantFalse:
1079        case spv::OpConstant:
1080        case spv::OpConstantComposite:
1081        case spv::OpConstantSampler:
1082        case spv::OpConstantNull:
1083            module->def_index[insn.word(2)] = insn.offset();
1084            break;
1085
1086        /* Specialization constants */
1087        case spv::OpSpecConstantTrue:
1088        case spv::OpSpecConstantFalse:
1089        case spv::OpSpecConstant:
1090        case spv::OpSpecConstantComposite:
1091        case spv::OpSpecConstantOp:
1092            module->def_index[insn.word(2)] = insn.offset();
1093            break;
1094
1095        /* Variables */
1096        case spv::OpVariable:
1097            module->def_index[insn.word(2)] = insn.offset();
1098            break;
1099
1100        /* Functions */
1101        case spv::OpFunction:
1102            module->def_index[insn.word(2)] = insn.offset();
1103            break;
1104
1105        default:
1106            /* We don't care about any other defs for now. */
1107            break;
1108        }
1109    }
1110}
1111
1112static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1113    for (auto insn : *src) {
1114        if (insn.opcode() == spv::OpEntryPoint) {
1115            auto entrypointName = (char const *)&insn.word(3);
1116            auto entrypointStageBits = 1u << insn.word(1);
1117
1118            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1119                return insn;
1120            }
1121        }
1122    }
1123
1124    return src->end();
1125}
1126
1127bool shader_is_spirv(VkShaderModuleCreateInfo const *pCreateInfo) {
1128    uint32_t *words = (uint32_t *)pCreateInfo->pCode;
1129    size_t sizeInWords = pCreateInfo->codeSize / sizeof(uint32_t);
1130
1131    /* Just validate that the header makes sense. */
1132    return sizeInWords >= 5 && words[0] == spv::MagicNumber && words[1] == spv::Version;
1133}
1134
1135static char const *storage_class_name(unsigned sc) {
1136    switch (sc) {
1137    case spv::StorageClassInput:
1138        return "input";
1139    case spv::StorageClassOutput:
1140        return "output";
1141    case spv::StorageClassUniformConstant:
1142        return "const uniform";
1143    case spv::StorageClassUniform:
1144        return "uniform";
1145    case spv::StorageClassWorkgroup:
1146        return "workgroup local";
1147    case spv::StorageClassCrossWorkgroup:
1148        return "workgroup global";
1149    case spv::StorageClassPrivate:
1150        return "private global";
1151    case spv::StorageClassFunction:
1152        return "function";
1153    case spv::StorageClassGeneric:
1154        return "generic";
1155    case spv::StorageClassAtomicCounter:
1156        return "atomic counter";
1157    case spv::StorageClassImage:
1158        return "image";
1159    case spv::StorageClassPushConstant:
1160        return "push constant";
1161    default:
1162        return "unknown";
1163    }
1164}
1165
1166/* get the value of an integral constant */
1167unsigned get_constant_value(shader_module const *src, unsigned id) {
1168    auto value = src->get_def(id);
1169    assert(value != src->end());
1170
1171    if (value.opcode() != spv::OpConstant) {
1172        /* TODO: Either ensure that the specialization transform is already performed on a module we're
1173            considering here, OR -- specialize on the fly now.
1174            */
1175        return 1;
1176    }
1177
1178    return value.word(3);
1179}
1180
1181
1182static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1183    auto insn = src->get_def(type);
1184    assert(insn != src->end());
1185
1186    switch (insn.opcode()) {
1187    case spv::OpTypeBool:
1188        ss << "bool";
1189        break;
1190    case spv::OpTypeInt:
1191        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1192        break;
1193    case spv::OpTypeFloat:
1194        ss << "float" << insn.word(2);
1195        break;
1196    case spv::OpTypeVector:
1197        ss << "vec" << insn.word(3) << " of ";
1198        describe_type_inner(ss, src, insn.word(2));
1199        break;
1200    case spv::OpTypeMatrix:
1201        ss << "mat" << insn.word(3) << " of ";
1202        describe_type_inner(ss, src, insn.word(2));
1203        break;
1204    case spv::OpTypeArray:
1205        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1206        describe_type_inner(ss, src, insn.word(2));
1207        break;
1208    case spv::OpTypePointer:
1209        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1210        describe_type_inner(ss, src, insn.word(3));
1211        break;
1212    case spv::OpTypeStruct: {
1213        ss << "struct of (";
1214        for (unsigned i = 2; i < insn.len(); i++) {
1215            describe_type_inner(ss, src, insn.word(i));
1216            if (i == insn.len() - 1) {
1217                ss << ")";
1218            } else {
1219                ss << ", ";
1220            }
1221        }
1222        break;
1223    }
1224    case spv::OpTypeSampler:
1225        ss << "sampler";
1226        break;
1227    case spv::OpTypeSampledImage:
1228        ss << "sampler+";
1229        describe_type_inner(ss, src, insn.word(2));
1230        break;
1231    case spv::OpTypeImage:
1232        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1233        break;
1234    default:
1235        ss << "oddtype";
1236        break;
1237    }
1238}
1239
1240
1241static std::string describe_type(shader_module const *src, unsigned type) {
1242    std::ostringstream ss;
1243    describe_type_inner(ss, src, type);
1244    return ss.str();
1245}
1246
1247
1248static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed) {
1249    /* walk two type trees together, and complain about differences */
1250    auto a_insn = a->get_def(a_type);
1251    auto b_insn = b->get_def(b_type);
1252    assert(a_insn != a->end());
1253    assert(b_insn != b->end());
1254
1255    if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1256        return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed);
1257    }
1258
1259    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1260        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1261        return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false);
1262    }
1263
1264    if (a_insn.opcode() != b_insn.opcode()) {
1265        return false;
1266    }
1267
1268    if (a_insn.opcode() == spv::OpTypePointer) {
1269        /* match on pointee type. storage class is expected to differ */
1270        return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed);
1271    }
1272
1273    if (a_arrayed || b_arrayed) {
1274        /* if we havent resolved array-of-verts by here, we're not going to. */
1275        return false;
1276    }
1277
1278    switch (a_insn.opcode()) {
1279    case spv::OpTypeBool:
1280        return true;
1281    case spv::OpTypeInt:
1282        /* match on width, signedness */
1283        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1284    case spv::OpTypeFloat:
1285        /* match on width */
1286        return a_insn.word(2) == b_insn.word(2);
1287    case spv::OpTypeVector:
1288    case spv::OpTypeMatrix:
1289        /* match on element type, count. these all have the same layout. */
1290        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed) && a_insn.word(3) == b_insn.word(3);
1291    case spv::OpTypeArray:
1292        /* match on element type, count. these all have the same layout. we don't get here if
1293         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1294         * not a literal within OpTypeArray */
1295        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed) &&
1296               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1297    case spv::OpTypeStruct:
1298        /* match on all element types */
1299        {
1300            if (a_insn.len() != b_insn.len()) {
1301                return false; /* structs cannot match if member counts differ */
1302            }
1303
1304            for (unsigned i = 2; i < a_insn.len(); i++) {
1305                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed)) {
1306                    return false;
1307                }
1308            }
1309
1310            return true;
1311        }
1312    default:
1313        /* remaining types are CLisms, or may not appear in the interfaces we
1314         * are interested in. Just claim no match.
1315         */
1316        return false;
1317    }
1318}
1319
1320static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1321    auto it = map.find(id);
1322    if (it == map.end())
1323        return def;
1324    else
1325        return it->second;
1326}
1327
1328static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1329    auto insn = src->get_def(type);
1330    assert(insn != src->end());
1331
1332    switch (insn.opcode()) {
1333    case spv::OpTypePointer:
1334        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1335         * we're never actually passing pointers around. */
1336        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1337    case spv::OpTypeArray:
1338        if (strip_array_level) {
1339            return get_locations_consumed_by_type(src, insn.word(2), false);
1340        } else {
1341            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1342        }
1343    case spv::OpTypeMatrix:
1344        /* num locations is the dimension * element size */
1345        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1346    default:
1347        /* everything else is just 1. */
1348        return 1;
1349
1350        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1351         * multiple locations. */
1352    }
1353}
1354
1355typedef std::pair<unsigned, unsigned> location_t;
1356typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1357
1358struct interface_var {
1359    uint32_t id;
1360    uint32_t type_id;
1361    uint32_t offset;
1362    bool is_patch;
1363    /* TODO: collect the name, too? Isn't required to be present. */
1364};
1365
1366struct shader_stage_attributes {
1367    char const *const name;
1368    bool arrayed_input;
1369    bool arrayed_output;
1370};
1371
1372static shader_stage_attributes shader_stage_attribs[] = {
1373    {"vertex shader", false, false},
1374    {"tessellation control shader", true, true},
1375    {"tessellation evaluation shader", true, false},
1376    {"geometry shader", true, false},
1377    {"fragment shader", false, false},
1378};
1379
1380static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1381    while (true) {
1382
1383        if (def.opcode() == spv::OpTypePointer) {
1384            def = src->get_def(def.word(3));
1385        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1386            def = src->get_def(def.word(2));
1387            is_array_of_verts = false;
1388        } else if (def.opcode() == spv::OpTypeStruct) {
1389            return def;
1390        } else {
1391            return src->end();
1392        }
1393    }
1394}
1395
1396static void collect_interface_block_members(layer_data *my_data, shader_module const *src,
1397                                            std::map<location_t, interface_var> &out,
1398                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1399                                            uint32_t id, uint32_t type_id, bool is_patch) {
1400    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1401    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1402    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1403        /* this isn't an interface block. */
1404        return;
1405    }
1406
1407    std::unordered_map<unsigned, unsigned> member_components;
1408
1409    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1410    for (auto insn : *src) {
1411        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1412            unsigned member_index = insn.word(2);
1413
1414            if (insn.word(3) == spv::DecorationComponent) {
1415                unsigned component = insn.word(4);
1416                member_components[member_index] = component;
1417            }
1418        }
1419    }
1420
1421    /* Second pass -- produce the output, from Location decorations */
1422    for (auto insn : *src) {
1423        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1424            unsigned member_index = insn.word(2);
1425            unsigned member_type_id = type.word(2 + member_index);
1426
1427            if (insn.word(3) == spv::DecorationLocation) {
1428                unsigned location = insn.word(4);
1429                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1430                auto component_it = member_components.find(member_index);
1431                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1432
1433                for (unsigned int offset = 0; offset < num_locations; offset++) {
1434                    interface_var v;
1435                    v.id = id;
1436                    /* TODO: member index in interface_var too? */
1437                    v.type_id = member_type_id;
1438                    v.offset = offset;
1439                    v.is_patch = is_patch;
1440                    out[std::make_pair(location + offset, component)] = v;
1441                }
1442            }
1443        }
1444    }
1445}
1446
1447static void collect_interface_by_location(layer_data *my_data, shader_module const *src, spirv_inst_iter entrypoint,
1448                                          spv::StorageClass sinterface, std::map<location_t, interface_var> &out,
1449                                          bool is_array_of_verts) {
1450    std::unordered_map<unsigned, unsigned> var_locations;
1451    std::unordered_map<unsigned, unsigned> var_builtins;
1452    std::unordered_map<unsigned, unsigned> var_components;
1453    std::unordered_map<unsigned, unsigned> blocks;
1454    std::unordered_map<unsigned, unsigned> var_patch;
1455
1456    for (auto insn : *src) {
1457
1458        /* We consider two interface models: SSO rendezvous-by-location, and
1459         * builtins. Complain about anything that fits neither model.
1460         */
1461        if (insn.opcode() == spv::OpDecorate) {
1462            if (insn.word(2) == spv::DecorationLocation) {
1463                var_locations[insn.word(1)] = insn.word(3);
1464            }
1465
1466            if (insn.word(2) == spv::DecorationBuiltIn) {
1467                var_builtins[insn.word(1)] = insn.word(3);
1468            }
1469
1470            if (insn.word(2) == spv::DecorationComponent) {
1471                var_components[insn.word(1)] = insn.word(3);
1472            }
1473
1474            if (insn.word(2) == spv::DecorationBlock) {
1475                blocks[insn.word(1)] = 1;
1476            }
1477
1478            if (insn.word(2) == spv::DecorationPatch) {
1479                var_patch[insn.word(1)] = 1;
1480            }
1481        }
1482    }
1483
1484    /* TODO: handle grouped decorations */
1485    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1486     * have the same location, and we DON'T want to clobber. */
1487
1488    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1489       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1490       the word to determine which word contains the terminator. */
1491    uint32_t word = 3;
1492    while (entrypoint.word(word) & 0xff000000u) {
1493        ++word;
1494    }
1495    ++word;
1496
1497    for (; word < entrypoint.len(); word++) {
1498        auto insn = src->get_def(entrypoint.word(word));
1499        assert(insn != src->end());
1500        assert(insn.opcode() == spv::OpVariable);
1501
1502        if (insn.word(3) == sinterface) {
1503            unsigned id = insn.word(2);
1504            unsigned type = insn.word(1);
1505
1506            int location = value_or_default(var_locations, id, -1);
1507            int builtin = value_or_default(var_builtins, id, -1);
1508            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1509            bool is_patch = var_patch.find(id) != var_patch.end();
1510
1511            /* All variables and interface block members in the Input or Output storage classes
1512             * must be decorated with either a builtin or an explicit location.
1513             *
1514             * TODO: integrate the interface block support here. For now, don't complain --
1515             * a valid SPIRV module will only hit this path for the interface block case, as the
1516             * individual members of the type are decorated, rather than variable declarations.
1517             */
1518
1519            if (location != -1) {
1520                /* A user-defined interface variable, with a location. Where a variable
1521                 * occupied multiple locations, emit one result for each. */
1522                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1523                for (unsigned int offset = 0; offset < num_locations; offset++) {
1524                    interface_var v;
1525                    v.id = id;
1526                    v.type_id = type;
1527                    v.offset = offset;
1528                    v.is_patch = is_patch;
1529                    out[std::make_pair(location + offset, component)] = v;
1530                }
1531            } else if (builtin == -1) {
1532                /* An interface block instance */
1533                collect_interface_block_members(my_data, src, out, blocks, is_array_of_verts, id, type, is_patch);
1534            }
1535        }
1536    }
1537}
1538
1539static void collect_interface_by_descriptor_slot(layer_data *my_data, shader_module const *src,
1540                                                 std::unordered_set<uint32_t> const &accessible_ids,
1541                                                 std::map<descriptor_slot_t, interface_var> &out) {
1542
1543    std::unordered_map<unsigned, unsigned> var_sets;
1544    std::unordered_map<unsigned, unsigned> var_bindings;
1545
1546    for (auto insn : *src) {
1547        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1548         * DecorationDescriptorSet and DecorationBinding.
1549         */
1550        if (insn.opcode() == spv::OpDecorate) {
1551            if (insn.word(2) == spv::DecorationDescriptorSet) {
1552                var_sets[insn.word(1)] = insn.word(3);
1553            }
1554
1555            if (insn.word(2) == spv::DecorationBinding) {
1556                var_bindings[insn.word(1)] = insn.word(3);
1557            }
1558        }
1559    }
1560
1561    for (auto id : accessible_ids) {
1562        auto insn = src->get_def(id);
1563        assert(insn != src->end());
1564
1565        if (insn.opcode() == spv::OpVariable &&
1566            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1567            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1568            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1569
1570            auto existing_it = out.find(std::make_pair(set, binding));
1571            if (existing_it != out.end()) {
1572                /* conflict within spv image */
1573                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1574                        __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
1575                        "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition",
1576                        insn.word(2), insn.word(1), storage_class_name(insn.word(3)), existing_it->first.first,
1577                        existing_it->first.second);
1578            }
1579
1580            interface_var v;
1581            v.id = insn.word(2);
1582            v.type_id = insn.word(1);
1583            v.offset = 0;
1584            v.is_patch = false;
1585            out[std::make_pair(set, binding)] = v;
1586        }
1587    }
1588}
1589
1590static bool validate_interface_between_stages(layer_data *my_data, shader_module const *producer,
1591                                              spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1592                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1593                                              shader_stage_attributes const *consumer_stage) {
1594    std::map<location_t, interface_var> outputs;
1595    std::map<location_t, interface_var> inputs;
1596
1597    bool pass = true;
1598
1599    collect_interface_by_location(my_data, producer, producer_entrypoint, spv::StorageClassOutput, outputs, producer_stage->arrayed_output);
1600    collect_interface_by_location(my_data, consumer, consumer_entrypoint, spv::StorageClassInput, inputs, consumer_stage->arrayed_input);
1601
1602    auto a_it = outputs.begin();
1603    auto b_it = inputs.begin();
1604
1605    /* maps sorted by key (location); walk them together to find mismatches */
1606    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1607        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1608        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1609        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1610        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1611
1612        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1613            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1614                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1615                        "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1616                        a_first.second, consumer_stage->name)) {
1617                pass = false;
1618            }
1619            a_it++;
1620        } else if (a_at_end || a_first > b_first) {
1621            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1622                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1623                        "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second,
1624                        producer_stage->name)) {
1625                pass = false;
1626            }
1627            b_it++;
1628        } else {
1629            if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1630                             producer_stage->arrayed_output && !a_it->second.is_patch,
1631                             consumer_stage->arrayed_input && !b_it->second.is_patch)) {
1632                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1633                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1634                            a_first.first, a_first.second,
1635                            describe_type(producer, a_it->second.type_id).c_str(),
1636                            describe_type(consumer, b_it->second.type_id).c_str())) {
1637                    pass = false;
1638                }
1639            }
1640            if (a_it->second.is_patch != b_it->second.is_patch) {
1641                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1642                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1643                            "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1644                            "per-%s in %s stage", a_first.first, a_first.second,
1645                            a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1646                            b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1647                    pass = false;
1648                }
1649            }
1650            a_it++;
1651            b_it++;
1652        }
1653    }
1654
1655    return pass;
1656}
1657
1658enum FORMAT_TYPE {
1659    FORMAT_TYPE_UNDEFINED,
1660    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1661    FORMAT_TYPE_SINT,
1662    FORMAT_TYPE_UINT,
1663};
1664
1665static unsigned get_format_type(VkFormat fmt) {
1666    switch (fmt) {
1667    case VK_FORMAT_UNDEFINED:
1668        return FORMAT_TYPE_UNDEFINED;
1669    case VK_FORMAT_R8_SINT:
1670    case VK_FORMAT_R8G8_SINT:
1671    case VK_FORMAT_R8G8B8_SINT:
1672    case VK_FORMAT_R8G8B8A8_SINT:
1673    case VK_FORMAT_R16_SINT:
1674    case VK_FORMAT_R16G16_SINT:
1675    case VK_FORMAT_R16G16B16_SINT:
1676    case VK_FORMAT_R16G16B16A16_SINT:
1677    case VK_FORMAT_R32_SINT:
1678    case VK_FORMAT_R32G32_SINT:
1679    case VK_FORMAT_R32G32B32_SINT:
1680    case VK_FORMAT_R32G32B32A32_SINT:
1681    case VK_FORMAT_B8G8R8_SINT:
1682    case VK_FORMAT_B8G8R8A8_SINT:
1683    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1684    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1685        return FORMAT_TYPE_SINT;
1686    case VK_FORMAT_R8_UINT:
1687    case VK_FORMAT_R8G8_UINT:
1688    case VK_FORMAT_R8G8B8_UINT:
1689    case VK_FORMAT_R8G8B8A8_UINT:
1690    case VK_FORMAT_R16_UINT:
1691    case VK_FORMAT_R16G16_UINT:
1692    case VK_FORMAT_R16G16B16_UINT:
1693    case VK_FORMAT_R16G16B16A16_UINT:
1694    case VK_FORMAT_R32_UINT:
1695    case VK_FORMAT_R32G32_UINT:
1696    case VK_FORMAT_R32G32B32_UINT:
1697    case VK_FORMAT_R32G32B32A32_UINT:
1698    case VK_FORMAT_B8G8R8_UINT:
1699    case VK_FORMAT_B8G8R8A8_UINT:
1700    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1701    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1702        return FORMAT_TYPE_UINT;
1703    default:
1704        return FORMAT_TYPE_FLOAT;
1705    }
1706}
1707
1708/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1709 * for comparison to a VkFormat's characterization above. */
1710static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1711    auto insn = src->get_def(type);
1712    assert(insn != src->end());
1713
1714    switch (insn.opcode()) {
1715    case spv::OpTypeInt:
1716        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1717    case spv::OpTypeFloat:
1718        return FORMAT_TYPE_FLOAT;
1719    case spv::OpTypeVector:
1720        return get_fundamental_type(src, insn.word(2));
1721    case spv::OpTypeMatrix:
1722        return get_fundamental_type(src, insn.word(2));
1723    case spv::OpTypeArray:
1724        return get_fundamental_type(src, insn.word(2));
1725    case spv::OpTypePointer:
1726        return get_fundamental_type(src, insn.word(3));
1727    default:
1728        return FORMAT_TYPE_UNDEFINED;
1729    }
1730}
1731
1732static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1733    uint32_t bit_pos = u_ffs(stage);
1734    return bit_pos - 1;
1735}
1736
1737static bool validate_vi_consistency(layer_data *my_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1738    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1739     * each binding should be specified only once.
1740     */
1741    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1742    bool pass = true;
1743
1744    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1745        auto desc = &vi->pVertexBindingDescriptions[i];
1746        auto &binding = bindings[desc->binding];
1747        if (binding) {
1748            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1749                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1750                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1751                pass = false;
1752            }
1753        } else {
1754            binding = desc;
1755        }
1756    }
1757
1758    return pass;
1759}
1760
1761static bool validate_vi_against_vs_inputs(layer_data *my_data, VkPipelineVertexInputStateCreateInfo const *vi,
1762                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1763    std::map<location_t, interface_var> inputs;
1764    bool pass = true;
1765
1766    collect_interface_by_location(my_data, vs, entrypoint, spv::StorageClassInput, inputs, false);
1767
1768    /* Build index by location */
1769    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1770    if (vi) {
1771        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++)
1772            attribs[vi->pVertexAttributeDescriptions[i].location] = &vi->pVertexAttributeDescriptions[i];
1773    }
1774
1775    auto it_a = attribs.begin();
1776    auto it_b = inputs.begin();
1777
1778    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1779        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1780        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1781        auto a_first = a_at_end ? 0 : it_a->first;
1782        auto b_first = b_at_end ? 0 : it_b->first.first;
1783        if (!a_at_end && (b_at_end || a_first < b_first)) {
1784            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1785                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1786                        "Vertex attribute at location %d not consumed by VS", a_first)) {
1787                pass = false;
1788            }
1789            it_a++;
1790        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1791            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1792                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
1793                        b_first)) {
1794                pass = false;
1795            }
1796            it_b++;
1797        } else {
1798            unsigned attrib_type = get_format_type(it_a->second->format);
1799            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1800
1801            /* type checking */
1802            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1803                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1804                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1805                            "Attribute type of `%s` at location %d does not match VS input type of `%s`",
1806                            string_VkFormat(it_a->second->format), a_first,
1807                            describe_type(vs, it_b->second.type_id).c_str())) {
1808                    pass = false;
1809                }
1810            }
1811
1812            /* OK! */
1813            it_a++;
1814            it_b++;
1815        }
1816    }
1817
1818    return pass;
1819}
1820
1821static bool validate_fs_outputs_against_render_pass(layer_data *my_data, shader_module const *fs,
1822                                                    spirv_inst_iter entrypoint, RENDER_PASS_NODE const *rp, uint32_t subpass) {
1823    const std::vector<VkFormat> &color_formats = rp->subpassColorFormats[subpass];
1824    std::map<location_t, interface_var> outputs;
1825    bool pass = true;
1826
1827    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1828
1829    collect_interface_by_location(my_data, fs, entrypoint, spv::StorageClassOutput, outputs, false);
1830
1831    auto it = outputs.begin();
1832    uint32_t attachment = 0;
1833
1834    /* Walk attachment list and outputs together -- this is a little overpowered since attachments
1835     * are currently dense, but the parallel with matching between shader stages is nice.
1836     */
1837
1838    while ((outputs.size() > 0 && it != outputs.end()) || attachment < color_formats.size()) {
1839        if (attachment == color_formats.size() || (it != outputs.end() && it->first.first < attachment)) {
1840            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1841                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1842                        "FS writes to output location %d with no matching attachment", it->first.first)) {
1843                pass = false;
1844            }
1845            it++;
1846        } else if (it == outputs.end() || it->first.first > attachment) {
1847            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1848                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", attachment)) {
1849                pass = false;
1850            }
1851            attachment++;
1852        } else {
1853            unsigned output_type = get_fundamental_type(fs, it->second.type_id);
1854            unsigned att_type = get_format_type(color_formats[attachment]);
1855
1856            /* type checking */
1857            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1858                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1859                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1860                            "Attachment %d of type `%s` does not match FS output type of `%s`", attachment,
1861                            string_VkFormat(color_formats[attachment]),
1862                            describe_type(fs, it->second.type_id).c_str())) {
1863                    pass = false;
1864                }
1865            }
1866
1867            /* OK! */
1868            it++;
1869            attachment++;
1870        }
1871    }
1872
1873    return pass;
1874}
1875
1876/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
1877 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
1878 * for example.
1879 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1880 *  - NOT the shader input/output interfaces.
1881 *
1882 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1883 * converting parts of this to be generated from the machine-readable spec instead.
1884 */
1885static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) {
1886    std::unordered_set<uint32_t> worklist;
1887    worklist.insert(entrypoint.word(2));
1888
1889    while (!worklist.empty()) {
1890        auto id_iter = worklist.begin();
1891        auto id = *id_iter;
1892        worklist.erase(id_iter);
1893
1894        auto insn = src->get_def(id);
1895        if (insn == src->end()) {
1896            /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble
1897             * across all kinds of things here that we may not care about. */
1898            continue;
1899        }
1900
1901        /* try to add to the output set */
1902        if (!ids.insert(id).second) {
1903            continue; /* if we already saw this id, we don't want to walk it again. */
1904        }
1905
1906        switch (insn.opcode()) {
1907        case spv::OpFunction:
1908            /* scan whole body of the function, enlisting anything interesting */
1909            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1910                switch (insn.opcode()) {
1911                case spv::OpLoad:
1912                case spv::OpAtomicLoad:
1913                case spv::OpAtomicExchange:
1914                case spv::OpAtomicCompareExchange:
1915                case spv::OpAtomicCompareExchangeWeak:
1916                case spv::OpAtomicIIncrement:
1917                case spv::OpAtomicIDecrement:
1918                case spv::OpAtomicIAdd:
1919                case spv::OpAtomicISub:
1920                case spv::OpAtomicSMin:
1921                case spv::OpAtomicUMin:
1922                case spv::OpAtomicSMax:
1923                case spv::OpAtomicUMax:
1924                case spv::OpAtomicAnd:
1925                case spv::OpAtomicOr:
1926                case spv::OpAtomicXor:
1927                    worklist.insert(insn.word(3)); /* ptr */
1928                    break;
1929                case spv::OpStore:
1930                case spv::OpAtomicStore:
1931                    worklist.insert(insn.word(1)); /* ptr */
1932                    break;
1933                case spv::OpAccessChain:
1934                case spv::OpInBoundsAccessChain:
1935                    worklist.insert(insn.word(3)); /* base ptr */
1936                    break;
1937                case spv::OpSampledImage:
1938                case spv::OpImageSampleImplicitLod:
1939                case spv::OpImageSampleExplicitLod:
1940                case spv::OpImageSampleDrefImplicitLod:
1941                case spv::OpImageSampleDrefExplicitLod:
1942                case spv::OpImageSampleProjImplicitLod:
1943                case spv::OpImageSampleProjExplicitLod:
1944                case spv::OpImageSampleProjDrefImplicitLod:
1945                case spv::OpImageSampleProjDrefExplicitLod:
1946                case spv::OpImageFetch:
1947                case spv::OpImageGather:
1948                case spv::OpImageDrefGather:
1949                case spv::OpImageRead:
1950                case spv::OpImage:
1951                case spv::OpImageQueryFormat:
1952                case spv::OpImageQueryOrder:
1953                case spv::OpImageQuerySizeLod:
1954                case spv::OpImageQuerySize:
1955                case spv::OpImageQueryLod:
1956                case spv::OpImageQueryLevels:
1957                case spv::OpImageQuerySamples:
1958                case spv::OpImageSparseSampleImplicitLod:
1959                case spv::OpImageSparseSampleExplicitLod:
1960                case spv::OpImageSparseSampleDrefImplicitLod:
1961                case spv::OpImageSparseSampleDrefExplicitLod:
1962                case spv::OpImageSparseSampleProjImplicitLod:
1963                case spv::OpImageSparseSampleProjExplicitLod:
1964                case spv::OpImageSparseSampleProjDrefImplicitLod:
1965                case spv::OpImageSparseSampleProjDrefExplicitLod:
1966                case spv::OpImageSparseFetch:
1967                case spv::OpImageSparseGather:
1968                case spv::OpImageSparseDrefGather:
1969                case spv::OpImageTexelPointer:
1970                    worklist.insert(insn.word(3)); /* image or sampled image */
1971                    break;
1972                case spv::OpImageWrite:
1973                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
1974                    break;
1975                case spv::OpFunctionCall:
1976                    for (uint32_t i = 3; i < insn.len(); i++) {
1977                        worklist.insert(insn.word(i)); /* fn itself, and all args */
1978                    }
1979                    break;
1980
1981                case spv::OpExtInst:
1982                    for (uint32_t i = 5; i < insn.len(); i++) {
1983                        worklist.insert(insn.word(i)); /* operands to ext inst */
1984                    }
1985                    break;
1986                }
1987            }
1988            break;
1989        }
1990    }
1991}
1992
1993static bool validate_push_constant_block_against_pipeline(layer_data *my_data,
1994                                                          std::vector<VkPushConstantRange> const *pushConstantRanges,
1995                                                          shader_module const *src, spirv_inst_iter type,
1996                                                          VkShaderStageFlagBits stage) {
1997    bool pass = true;
1998
1999    /* strip off ptrs etc */
2000    type = get_struct_type(src, type, false);
2001    assert(type != src->end());
2002
2003    /* validate directly off the offsets. this isn't quite correct for arrays
2004     * and matrices, but is a good first step. TODO: arrays, matrices, weird
2005     * sizes */
2006    for (auto insn : *src) {
2007        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
2008
2009            if (insn.word(3) == spv::DecorationOffset) {
2010                unsigned offset = insn.word(4);
2011                auto size = 4; /* bytes; TODO: calculate this based on the type */
2012
2013                bool found_range = false;
2014                for (auto const &range : *pushConstantRanges) {
2015                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
2016                        found_range = true;
2017
2018                        if ((range.stageFlags & stage) == 0) {
2019                            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2020                                        __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2021                                        "Push constant range covering variable starting at "
2022                                        "offset %u not accessible from stage %s",
2023                                        offset, string_VkShaderStageFlagBits(stage))) {
2024                                pass = false;
2025                            }
2026                        }
2027
2028                        break;
2029                    }
2030                }
2031
2032                if (!found_range) {
2033                    if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2034                                __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
2035                                "Push constant range covering variable starting at "
2036                                "offset %u not declared in layout",
2037                                offset)) {
2038                        pass = false;
2039                    }
2040                }
2041            }
2042        }
2043    }
2044
2045    return pass;
2046}
2047
2048static bool validate_push_constant_usage(layer_data *my_data,
2049                                         std::vector<VkPushConstantRange> const *pushConstantRanges, shader_module const *src,
2050                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2051    bool pass = true;
2052
2053    for (auto id : accessible_ids) {
2054        auto def_insn = src->get_def(id);
2055        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
2056            pass &= validate_push_constant_block_against_pipeline(my_data, pushConstantRanges, src,
2057                                                                 src->get_def(def_insn.word(1)), stage);
2058        }
2059    }
2060
2061    return pass;
2062}
2063
2064// For given pipelineLayout verify that the setLayout at slot.first
2065//  has the requested binding at slot.second
2066static VkDescriptorSetLayoutBinding const * get_descriptor_binding(layer_data *my_data, PIPELINE_LAYOUT_NODE *pipelineLayout, descriptor_slot_t slot) {
2067
2068    if (!pipelineLayout)
2069        return nullptr;
2070
2071    if (slot.first >= pipelineLayout->descriptorSetLayouts.size())
2072        return nullptr;
2073
2074    auto const layout_node = my_data->descriptorSetLayoutMap[pipelineLayout->descriptorSetLayouts[slot.first]];
2075
2076    auto bindingIt = layout_node->bindingToIndexMap.find(slot.second);
2077    if ((bindingIt == layout_node->bindingToIndexMap.end()) || (layout_node->createInfo.pBindings == NULL))
2078        return nullptr;
2079
2080    assert(bindingIt->second < layout_node->createInfo.bindingCount);
2081    return &layout_node->createInfo.pBindings[bindingIt->second];
2082}
2083
2084// Block of code at start here for managing/tracking Pipeline state that this layer cares about
2085
2086static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
2087
2088// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2089//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2090//   to that same cmd buffer by separate thread are not changing state from underneath us
2091// Track the last cmd buffer touched by this thread
2092
2093static VkBool32 hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2094    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2095        if (pCB->drawCount[i])
2096            return VK_TRUE;
2097    }
2098    return VK_FALSE;
2099}
2100
2101// Check object status for selected flag state
2102static VkBool32 validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
2103                                DRAW_STATE_ERROR error_code, const char *fail_msg) {
2104    if (!(pNode->status & status_mask)) {
2105        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2106                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS",
2107                       "CB object %#" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg);
2108    }
2109    return VK_FALSE;
2110}
2111
2112// Retrieve pipeline node ptr for given pipeline object
2113static PIPELINE_NODE *getPipeline(layer_data *my_data, const VkPipeline pipeline) {
2114    if (my_data->pipelineMap.find(pipeline) == my_data->pipelineMap.end()) {
2115        return NULL;
2116    }
2117    return my_data->pipelineMap[pipeline];
2118}
2119
2120// Return VK_TRUE if for a given PSO, the given state enum is dynamic, else return VK_FALSE
2121static VkBool32 isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
2122    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2123        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2124            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2125                return VK_TRUE;
2126        }
2127    }
2128    return VK_FALSE;
2129}
2130
2131// Validate state stored as flags at time of draw call
2132static VkBool32 validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe,
2133                                          VkBool32 indexedDraw) {
2134    VkBool32 result;
2135    result = validate_status(dev_data, pCB, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_VIEWPORT_NOT_BOUND,
2136                             "Dynamic viewport state not set for this command buffer");
2137    result |= validate_status(dev_data, pCB, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_SCISSOR_NOT_BOUND,
2138                              "Dynamic scissor state not set for this command buffer");
2139    if ((pPipe->iaStateCI.topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2140        (pPipe->iaStateCI.topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP)) {
2141        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2142                                  DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
2143    }
2144    if (pPipe->rsStateCI.depthBiasEnable) {
2145        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2146                                  DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
2147    }
2148    if (pPipe->blendConstantsEnabled) {
2149        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2150                                  DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer");
2151    }
2152    if (pPipe->dsStateCI.depthBoundsTestEnable) {
2153        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2154                                  DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
2155    }
2156    if (pPipe->dsStateCI.stencilTestEnable) {
2157        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2158                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
2159        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2160                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
2161        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2162                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
2163    }
2164    if (indexedDraw) {
2165        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2166                                  DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2167                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2168    }
2169    return result;
2170}
2171
2172// Verify attachment reference compatibility according to spec
2173//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2174//  If both AttachmentReference arrays have requested index, check their corresponding AttachementDescriptions
2175//   to make sure that format and samples counts match.
2176//  If not, they are not compatible.
2177static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2178                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2179                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2180                                             const VkAttachmentDescription *pSecondaryAttachments) {
2181    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2182        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2183            return true;
2184    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2185        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2186            return true;
2187    } else { // format and sample count must match
2188        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2189             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2190            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2191             pSecondaryAttachments[pSecondary[index].attachment].samples))
2192            return true;
2193    }
2194    // Format and sample counts didn't match
2195    return false;
2196}
2197
2198// For give primary and secondary RenderPass objects, verify that they're compatible
2199static bool verify_renderpass_compatibility(layer_data *my_data, const VkRenderPass primaryRP, const VkRenderPass secondaryRP,
2200                                            string &errorMsg) {
2201    stringstream errorStr;
2202    if (my_data->renderPassMap.find(primaryRP) == my_data->renderPassMap.end()) {
2203        errorStr << "invalid VkRenderPass (" << primaryRP << ")";
2204        errorMsg = errorStr.str();
2205        return false;
2206    } else if (my_data->renderPassMap.find(secondaryRP) == my_data->renderPassMap.end()) {
2207        errorStr << "invalid VkRenderPass (" << secondaryRP << ")";
2208        errorMsg = errorStr.str();
2209        return false;
2210    }
2211    // Trivial pass case is exact same RP
2212    if (primaryRP == secondaryRP) {
2213        return true;
2214    }
2215    const VkRenderPassCreateInfo *primaryRPCI = my_data->renderPassMap[primaryRP]->pCreateInfo;
2216    const VkRenderPassCreateInfo *secondaryRPCI = my_data->renderPassMap[secondaryRP]->pCreateInfo;
2217    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2218        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2219                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2220        errorMsg = errorStr.str();
2221        return false;
2222    }
2223    uint32_t spIndex = 0;
2224    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2225        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2226        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2227        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2228        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2229        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2230            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2231                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2232                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2233                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2234                errorMsg = errorStr.str();
2235                return false;
2236            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2237                                                         primaryColorCount, primaryRPCI->pAttachments,
2238                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2239                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2240                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2241                errorMsg = errorStr.str();
2242                return false;
2243            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2244                                                         primaryColorCount, primaryRPCI->pAttachments,
2245                                                         secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2246                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2247                errorStr << "depth/stencil attachments at index " << cIdx << " of subpass index " << spIndex
2248                         << " are not compatible.";
2249                errorMsg = errorStr.str();
2250                return false;
2251            }
2252        }
2253        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2254        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2255        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2256        for (uint32_t i = 0; i < inputMax; ++i) {
2257            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2258                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2259                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2260                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2261                errorMsg = errorStr.str();
2262                return false;
2263            }
2264        }
2265    }
2266    return true;
2267}
2268
2269// For give SET_NODE, verify that its Set is compatible w/ the setLayout corresponding to pipelineLayout[layoutIndex]
2270static bool verify_set_layout_compatibility(layer_data *my_data, const SET_NODE *pSet, const VkPipelineLayout layout,
2271                                            const uint32_t layoutIndex, string &errorMsg) {
2272    stringstream errorStr;
2273    auto pipeline_layout_it = my_data->pipelineLayoutMap.find(layout);
2274    if (pipeline_layout_it == my_data->pipelineLayoutMap.end()) {
2275        errorStr << "invalid VkPipelineLayout (" << layout << ")";
2276        errorMsg = errorStr.str();
2277        return false;
2278    }
2279    if (layoutIndex >= pipeline_layout_it->second.descriptorSetLayouts.size()) {
2280        errorStr << "VkPipelineLayout (" << layout << ") only contains " << pipeline_layout_it->second.descriptorSetLayouts.size()
2281                 << " setLayouts corresponding to sets 0-" << pipeline_layout_it->second.descriptorSetLayouts.size() - 1
2282                 << ", but you're attempting to bind set to index " << layoutIndex;
2283        errorMsg = errorStr.str();
2284        return false;
2285    }
2286    // Get the specific setLayout from PipelineLayout that overlaps this set
2287    LAYOUT_NODE *pLayoutNode = my_data->descriptorSetLayoutMap[pipeline_layout_it->second.descriptorSetLayouts[layoutIndex]];
2288    if (pLayoutNode->layout == pSet->pLayout->layout) { // trivial pass case
2289        return true;
2290    }
2291    size_t descriptorCount = pLayoutNode->descriptorTypes.size();
2292    if (descriptorCount != pSet->pLayout->descriptorTypes.size()) {
2293        errorStr << "setLayout " << layoutIndex << " from pipelineLayout " << layout << " has " << descriptorCount
2294                 << " descriptors, but corresponding set being bound has " << pSet->pLayout->descriptorTypes.size()
2295                 << " descriptors.";
2296        errorMsg = errorStr.str();
2297        return false; // trivial fail case
2298    }
2299    // Now need to check set against corresponding pipelineLayout to verify compatibility
2300    for (size_t i = 0; i < descriptorCount; ++i) {
2301        // Need to verify that layouts are identically defined
2302        //  TODO : Is below sufficient? Making sure that types & stageFlags match per descriptor
2303        //    do we also need to check immutable samplers?
2304        if (pLayoutNode->descriptorTypes[i] != pSet->pLayout->descriptorTypes[i]) {
2305            errorStr << "descriptor " << i << " for descriptorSet being bound is type '"
2306                     << string_VkDescriptorType(pSet->pLayout->descriptorTypes[i])
2307                     << "' but corresponding descriptor from pipelineLayout is type '"
2308                     << string_VkDescriptorType(pLayoutNode->descriptorTypes[i]) << "'";
2309            errorMsg = errorStr.str();
2310            return false;
2311        }
2312        if (pLayoutNode->stageFlags[i] != pSet->pLayout->stageFlags[i]) {
2313            errorStr << "stageFlags " << i << " for descriptorSet being bound is " << pSet->pLayout->stageFlags[i]
2314                     << "' but corresponding descriptor from pipelineLayout has stageFlags " << pLayoutNode->stageFlags[i];
2315            errorMsg = errorStr.str();
2316            return false;
2317        }
2318    }
2319    return true;
2320}
2321
2322// Validate that data for each specialization entry is fully contained within the buffer.
2323static VkBool32 validate_specialization_offsets(layer_data *my_data, VkPipelineShaderStageCreateInfo const *info) {
2324    VkBool32 pass = VK_TRUE;
2325
2326    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2327
2328    if (spec) {
2329        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2330            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2331                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2332                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2333                            "Specialization entry %u (for constant id %u) references memory outside provided "
2334                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2335                            " bytes provided)",
2336                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2337                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2338
2339                    pass = VK_FALSE;
2340                }
2341            }
2342        }
2343    }
2344
2345    return pass;
2346}
2347
2348static bool descriptor_type_match(layer_data *my_data, shader_module const *module, uint32_t type_id,
2349                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2350    auto type = module->get_def(type_id);
2351
2352    descriptor_count = 1;
2353
2354    /* Strip off any array or ptrs. Where we remove array levels, adjust the
2355     * descriptor count for each dimension. */
2356    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2357        if (type.opcode() == spv::OpTypeArray) {
2358            descriptor_count *= get_constant_value(module, type.word(3));
2359            type = module->get_def(type.word(2));
2360        }
2361        else {
2362            type = module->get_def(type.word(3));
2363        }
2364    }
2365
2366    switch (type.opcode()) {
2367    case spv::OpTypeStruct: {
2368        for (auto insn : *module) {
2369            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2370                if (insn.word(2) == spv::DecorationBlock) {
2371                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2372                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2373                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2374                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2375                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2376                }
2377            }
2378        }
2379
2380        /* Invalid */
2381        return false;
2382    }
2383
2384    case spv::OpTypeSampler:
2385        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER;
2386
2387    case spv::OpTypeSampledImage:
2388        if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2389            /* Slight relaxation for some GLSL historical madness: samplerBuffer
2390             * doesn't really have a sampler, and a texel buffer descriptor
2391             * doesn't really provide one. Allow this slight mismatch.
2392             */
2393            auto image_type = module->get_def(type.word(2));
2394            auto dim = image_type.word(3);
2395            auto sampled = image_type.word(7);
2396            return dim == spv::DimBuffer && sampled == 1;
2397        }
2398        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2399
2400    case spv::OpTypeImage: {
2401        /* Many descriptor types backing image types-- depends on dimension
2402         * and whether the image will be used with a sampler. SPIRV for
2403         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2404         * runtime is unacceptable.
2405         */
2406        auto dim = type.word(3);
2407        auto sampled = type.word(7);
2408
2409        if (dim == spv::DimSubpassData) {
2410            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2411        } else if (dim == spv::DimBuffer) {
2412            if (sampled == 1) {
2413                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2414            } else {
2415                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2416            }
2417        } else if (sampled == 1) {
2418            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
2419        } else {
2420            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2421        }
2422    }
2423
2424    /* We shouldn't really see any other junk types -- but if we do, they're
2425     * a mismatch.
2426     */
2427    default:
2428        return false; /* Mismatch */
2429    }
2430}
2431
2432static VkBool32 require_feature(layer_data *my_data, VkBool32 feature, char const *feature_name) {
2433    if (!feature) {
2434        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2435                    __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2436                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2437                    "enabled on the device",
2438                    feature_name)) {
2439            return false;
2440        }
2441    }
2442
2443    return true;
2444}
2445
2446static VkBool32 validate_shader_capabilities(layer_data *my_data, shader_module const *src)
2447{
2448    VkBool32 pass = VK_TRUE;
2449
2450    auto enabledFeatures = &my_data->physDevProperties.features;
2451
2452    for (auto insn : *src) {
2453        if (insn.opcode() == spv::OpCapability) {
2454            switch (insn.word(1)) {
2455            case spv::CapabilityMatrix:
2456            case spv::CapabilityShader:
2457            case spv::CapabilityInputAttachment:
2458            case spv::CapabilitySampled1D:
2459            case spv::CapabilityImage1D:
2460            case spv::CapabilitySampledBuffer:
2461            case spv::CapabilityImageBuffer:
2462            case spv::CapabilityImageQuery:
2463            case spv::CapabilityDerivativeControl:
2464                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2465                break;
2466
2467            case spv::CapabilityGeometry:
2468                pass &= require_feature(my_data, enabledFeatures->geometryShader, "geometryShader");
2469                break;
2470
2471            case spv::CapabilityTessellation:
2472                pass &= require_feature(my_data, enabledFeatures->tessellationShader, "tessellationShader");
2473                break;
2474
2475            case spv::CapabilityFloat64:
2476                pass &= require_feature(my_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2477                break;
2478
2479            case spv::CapabilityInt64:
2480                pass &= require_feature(my_data, enabledFeatures->shaderInt64, "shaderInt64");
2481                break;
2482
2483            case spv::CapabilityTessellationPointSize:
2484            case spv::CapabilityGeometryPointSize:
2485                pass &= require_feature(my_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2486                                        "shaderTessellationAndGeometryPointSize");
2487                break;
2488
2489            case spv::CapabilityImageGatherExtended:
2490                pass &= require_feature(my_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2491                break;
2492
2493            case spv::CapabilityStorageImageMultisample:
2494                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2495                break;
2496
2497            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2498                pass &= require_feature(my_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2499                                        "shaderUniformBufferArrayDynamicIndexing");
2500                break;
2501
2502            case spv::CapabilitySampledImageArrayDynamicIndexing:
2503                pass &= require_feature(my_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2504                                        "shaderSampledImageArrayDynamicIndexing");
2505                break;
2506
2507            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2508                pass &= require_feature(my_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2509                                        "shaderStorageBufferArrayDynamicIndexing");
2510                break;
2511
2512            case spv::CapabilityStorageImageArrayDynamicIndexing:
2513                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2514                                        "shaderStorageImageArrayDynamicIndexing");
2515                break;
2516
2517            case spv::CapabilityClipDistance:
2518                pass &= require_feature(my_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2519                break;
2520
2521            case spv::CapabilityCullDistance:
2522                pass &= require_feature(my_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2523                break;
2524
2525            case spv::CapabilityImageCubeArray:
2526                pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2527                break;
2528
2529            case spv::CapabilitySampleRateShading:
2530                pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2531                break;
2532
2533            case spv::CapabilitySparseResidency:
2534                pass &= require_feature(my_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2535                break;
2536
2537            case spv::CapabilityMinLod:
2538                pass &= require_feature(my_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2539                break;
2540
2541            case spv::CapabilitySampledCubeArray:
2542                pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2543                break;
2544
2545            case spv::CapabilityImageMSArray:
2546                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2547                break;
2548
2549            case spv::CapabilityStorageImageExtendedFormats:
2550                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageExtendedFormats,
2551                                        "shaderStorageImageExtendedFormats");
2552                break;
2553
2554            case spv::CapabilityInterpolationFunction:
2555                pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2556                break;
2557
2558            case spv::CapabilityStorageImageReadWithoutFormat:
2559                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2560                                        "shaderStorageImageReadWithoutFormat");
2561                break;
2562
2563            case spv::CapabilityStorageImageWriteWithoutFormat:
2564                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2565                                        "shaderStorageImageWriteWithoutFormat");
2566                break;
2567
2568            case spv::CapabilityMultiViewport:
2569                pass &= require_feature(my_data, enabledFeatures->multiViewport, "multiViewport");
2570                break;
2571
2572            default:
2573                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2574                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2575                            "Shader declares capability %u, not supported in Vulkan.",
2576                            insn.word(1)))
2577                    pass = VK_FALSE;
2578                break;
2579            }
2580        }
2581    }
2582
2583    return pass;
2584}
2585
2586
2587
2588static VkBool32 validate_pipeline_shader_stage(layer_data *dev_data,
2589                                               VkPipelineShaderStageCreateInfo const *pStage,
2590                                               PIPELINE_NODE *pipeline,
2591                                               PIPELINE_LAYOUT_NODE *pipelineLayout,
2592                                               shader_module **out_module,
2593                                               spirv_inst_iter *out_entrypoint)
2594{
2595    VkBool32 pass = VK_TRUE;
2596    auto module = *out_module = dev_data->shaderModuleMap[pStage->module].get();
2597    pass &= validate_specialization_offsets(dev_data, pStage);
2598
2599    /* find the entrypoint */
2600    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2601    if (entrypoint == module->end()) {
2602        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2603                    __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2604                    "No entrypoint found named `%s` for stage %s", pStage->pName,
2605                    string_VkShaderStageFlagBits(pStage->stage))) {
2606            pass = VK_FALSE;
2607        }
2608    }
2609
2610    /* validate shader capabilities against enabled device features */
2611    pass &= validate_shader_capabilities(dev_data, module);
2612
2613    /* mark accessible ids */
2614    std::unordered_set<uint32_t> accessible_ids;
2615    mark_accessible_ids(module, entrypoint, accessible_ids);
2616
2617    /* validate descriptor set layout against what the entrypoint actually uses */
2618    std::map<descriptor_slot_t, interface_var> descriptor_uses;
2619    collect_interface_by_descriptor_slot(dev_data, module, accessible_ids, descriptor_uses);
2620
2621    /* validate push constant usage */
2622    pass &= validate_push_constant_usage(dev_data, &pipelineLayout->pushConstantRanges,
2623                                        module, accessible_ids, pStage->stage);
2624
2625    /* validate descriptor use */
2626    for (auto use : descriptor_uses) {
2627        // While validating shaders capture which slots are used by the pipeline
2628        pipeline->active_slots[use.first.first].insert(use.first.second);
2629
2630        /* find the matching binding */
2631        auto binding = get_descriptor_binding(dev_data, pipelineLayout, use.first);
2632        unsigned required_descriptor_count;
2633
2634        if (!binding) {
2635            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2636                        __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2637                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2638                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2639                pass = VK_FALSE;
2640            }
2641        } else if (~binding->stageFlags & pStage->stage) {
2642            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2643                        /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2644                        "Shader uses descriptor slot %u.%u (used "
2645                        "as type `%s`) but descriptor not "
2646                        "accessible from stage %s",
2647                        use.first.first, use.first.second,
2648                        describe_type(module, use.second.type_id).c_str(),
2649                        string_VkShaderStageFlagBits(pStage->stage))) {
2650                pass = VK_FALSE;
2651            }
2652        } else if (!descriptor_type_match(dev_data, module, use.second.type_id, binding->descriptorType, /*out*/ required_descriptor_count)) {
2653            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2654                        __LINE__, SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2655                        "Type mismatch on descriptor slot "
2656                        "%u.%u (used as type `%s`) but "
2657                        "descriptor of type %s",
2658                        use.first.first, use.first.second,
2659                        describe_type(module, use.second.type_id).c_str(),
2660                        string_VkDescriptorType(binding->descriptorType))) {
2661                pass = VK_FALSE;
2662            }
2663        } else if (binding->descriptorCount < required_descriptor_count) {
2664            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2665                        __LINE__, SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2666                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2667                        required_descriptor_count, use.first.first, use.first.second,
2668                        describe_type(module, use.second.type_id).c_str(),
2669                        binding->descriptorCount)) {
2670                pass = VK_FALSE;
2671            }
2672        }
2673    }
2674
2675    return pass;
2676}
2677
2678
2679// Validate that the shaders used by the given pipeline and store the active_slots
2680//  that are actually used by the pipeline into pPipeline->active_slots
2681static VkBool32 validate_and_capture_pipeline_shader_state(layer_data *my_data, PIPELINE_NODE *pPipeline) {
2682    VkGraphicsPipelineCreateInfo const *pCreateInfo = &pPipeline->graphicsPipelineCI;
2683    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2684    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2685
2686    shader_module *shaders[5];
2687    memset(shaders, 0, sizeof(shaders));
2688    spirv_inst_iter entrypoints[5];
2689    memset(entrypoints, 0, sizeof(entrypoints));
2690    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2691    VkBool32 pass = VK_TRUE;
2692
2693    auto pipelineLayout = pCreateInfo->layout != VK_NULL_HANDLE ? &my_data->pipelineLayoutMap[pCreateInfo->layout] : nullptr;
2694
2695    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2696        VkPipelineShaderStageCreateInfo const *pStage = &pCreateInfo->pStages[i];
2697        auto stage_id = get_shader_stage_id(pStage->stage);
2698        pass &= validate_pipeline_shader_stage(my_data, pStage, pPipeline, pipelineLayout,
2699                                               &shaders[stage_id], &entrypoints[stage_id]);
2700    }
2701
2702    vi = pCreateInfo->pVertexInputState;
2703
2704    if (vi) {
2705        pass &= validate_vi_consistency(my_data, vi);
2706    }
2707
2708    if (shaders[vertex_stage]) {
2709        pass &= validate_vi_against_vs_inputs(my_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2710    }
2711
2712    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2713    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2714
2715    while (!shaders[producer] && producer != fragment_stage) {
2716        producer++;
2717        consumer++;
2718    }
2719
2720    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2721        assert(shaders[producer]);
2722        if (shaders[consumer]) {
2723            pass &= validate_interface_between_stages(my_data,
2724                                                      shaders[producer], entrypoints[producer], &shader_stage_attribs[producer],
2725                                                      shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]);
2726
2727            producer = consumer;
2728        }
2729    }
2730
2731    auto rp = pCreateInfo->renderPass != VK_NULL_HANDLE ? my_data->renderPassMap[pCreateInfo->renderPass] : nullptr;
2732
2733    if (shaders[fragment_stage] && rp) {
2734        pass &= validate_fs_outputs_against_render_pass(my_data, shaders[fragment_stage], entrypoints[fragment_stage], rp,
2735                                                       pCreateInfo->subpass);
2736    }
2737
2738    return pass;
2739}
2740
2741static VkBool32 validate_compute_pipeline(layer_data *my_data, PIPELINE_NODE *pPipeline) {
2742    VkComputePipelineCreateInfo const *pCreateInfo = &pPipeline->computePipelineCI;
2743
2744    auto pipelineLayout = pCreateInfo->layout != VK_NULL_HANDLE ? &my_data->pipelineLayoutMap[pCreateInfo->layout] : nullptr;
2745
2746    shader_module *module;
2747    spirv_inst_iter entrypoint;
2748
2749    return validate_pipeline_shader_stage(my_data, &pCreateInfo->stage, pPipeline, pipelineLayout,
2750                                          &module, &entrypoint);
2751}
2752
2753// Return Set node ptr for specified set or else NULL
2754static SET_NODE *getSetNode(layer_data *my_data, const VkDescriptorSet set) {
2755    if (my_data->setMap.find(set) == my_data->setMap.end()) {
2756        return NULL;
2757    }
2758    return my_data->setMap[set];
2759}
2760
2761// For given Layout Node and binding, return index where that binding begins
2762static uint32_t getBindingStartIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) {
2763    uint32_t offsetIndex = 0;
2764    for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
2765        if (pLayout->createInfo.pBindings[i].binding == binding)
2766            break;
2767        offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount;
2768    }
2769    return offsetIndex;
2770}
2771
2772// For given layout node and binding, return last index that is updated
2773static uint32_t getBindingEndIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) {
2774    uint32_t offsetIndex = 0;
2775    for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
2776        offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount;
2777        if (pLayout->createInfo.pBindings[i].binding == binding)
2778            break;
2779    }
2780    return offsetIndex - 1;
2781}
2782
2783// For the given command buffer, verify and update the state for activeSetBindingsPairs
2784//  This includes:
2785//  1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound.
2786//     To be valid, the dynamic offset combined with the offset and range from its
2787//     descriptor update must not overflow the size of its buffer being updated
2788//  2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images
2789//  3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers
2790static VkBool32 validate_and_update_drawtime_descriptor_state(
2791    layer_data *dev_data, GLOBAL_CB_NODE *pCB,
2792    const vector<std::pair<SET_NODE *, unordered_set<uint32_t>>> &activeSetBindingsPairs) {
2793    VkBool32 result = VK_FALSE;
2794
2795    VkWriteDescriptorSet *pWDS = NULL;
2796    uint32_t dynOffsetIndex = 0;
2797    VkDeviceSize bufferSize = 0;
2798    for (auto set_bindings_pair : activeSetBindingsPairs) {
2799        SET_NODE *set_node = set_bindings_pair.first;
2800        LAYOUT_NODE *layout_node = set_node->pLayout;
2801        for (auto binding : set_bindings_pair.second) {
2802            uint32_t startIdx = getBindingStartIndex(layout_node, binding);
2803            uint32_t endIdx = getBindingEndIndex(layout_node, binding);
2804            for (uint32_t i = startIdx; i <= endIdx; ++i) {
2805                // We did check earlier to verify that set was updated, but now make sure given slot was updated
2806                // TODO : Would be better to store set# that set is bound to so we can report set.binding[index] not updated
2807                if (!set_node->pDescriptorUpdates[i]) {
2808                    result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2809                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2810                                        DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2811                                        "DS %#" PRIxLEAST64 " bound and active but it never had binding %u updated. It is now being used to draw so "
2812                                                            "this will result in undefined behavior.",
2813                                        reinterpret_cast<const uint64_t &>(set_node->set), binding);
2814                } else {
2815                    switch (set_node->pDescriptorUpdates[i]->sType) {
2816                    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
2817                        pWDS = (VkWriteDescriptorSet *)set_node->pDescriptorUpdates[i];
2818                        if ((pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
2819                            (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
2820                            for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2821                                bufferSize = dev_data->bufferMap[pWDS->pBufferInfo[j].buffer].create_info->size;
2822                                uint32_t dynOffset = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].dynamicOffsets[dynOffsetIndex];
2823                                if (pWDS->pBufferInfo[j].range == VK_WHOLE_SIZE) {
2824                                    if ((dynOffset + pWDS->pBufferInfo[j].offset) > bufferSize) {
2825                                        result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2826                                                          VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2827                                                          reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2828                                                          DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
2829                                                          "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has range of "
2830                                                          "VK_WHOLE_SIZE but dynamic offset %#" PRIxLEAST32 ". "
2831                                                          "combined with offset %#" PRIxLEAST64 " oversteps its buffer (%#" PRIxLEAST64
2832                                                          ") which has a size of %#" PRIxLEAST64 ".",
2833                                                          reinterpret_cast<const uint64_t &>(set_node->set), i, dynOffset,
2834                                                          pWDS->pBufferInfo[j].offset,
2835                                                          reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2836                                    }
2837                                } else if ((dynOffset + pWDS->pBufferInfo[j].offset + pWDS->pBufferInfo[j].range) > bufferSize) {
2838                                    result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2839                                                      VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2840                                                      reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2841                                                      DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
2842                                                      "VkDescriptorSet (%#" PRIxLEAST64
2843                                                      ") bound as set #%u has dynamic offset %#" PRIxLEAST32 ". "
2844                                                      "Combined with offset %#" PRIxLEAST64 " and range %#" PRIxLEAST64
2845                                                      " from its update, this oversteps its buffer "
2846                                                      "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".",
2847                                                      reinterpret_cast<const uint64_t &>(set_node->set), i, dynOffset,
2848                                                      pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range,
2849                                                      reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2850                                }
2851                                dynOffsetIndex++;
2852                            }
2853                        } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
2854                            for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2855                                pCB->updateImages.insert(pWDS->pImageInfo[j].imageView);
2856                            }
2857                        } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
2858                            for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2859                                assert(dev_data->bufferViewMap.find(pWDS->pTexelBufferView[j]) != dev_data->bufferViewMap.end());
2860                                pCB->updateBuffers.insert(dev_data->bufferViewMap[pWDS->pTexelBufferView[j]].buffer);
2861                            }
2862                        } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2863                                   pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
2864                            for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2865                                pCB->updateBuffers.insert(pWDS->pBufferInfo[j].buffer);
2866                            }
2867                        }
2868                        i += pWDS->descriptorCount; // Advance i to end of this set of descriptors (++i at end of for loop will move 1
2869                                                    // index past last of these descriptors)
2870                        break;
2871                    default: // Currently only shadowing Write update nodes so shouldn't get here
2872                        assert(0);
2873                        continue;
2874                    }
2875                }
2876            }
2877        }
2878    }
2879    return result;
2880}
2881// TODO : This is a temp function that naively updates bound storage images and buffers based on which descriptor sets are bound.
2882//   When validate_and_update_draw_state() handles computer shaders so that active_slots is correct for compute pipelines, this
2883//   function can be killed and validate_and_update_draw_state() used instead
2884static void update_shader_storage_images_and_buffers(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
2885    VkWriteDescriptorSet *pWDS = nullptr;
2886    SET_NODE *pSet = nullptr;
2887    // For the bound descriptor sets, pull off any storage images and buffers
2888    //  This may be more than are actually updated depending on which are active, but for now this is a stop-gap for compute
2889    //  pipelines
2890    for (auto set : pCB->lastBound[VK_PIPELINE_BIND_POINT_COMPUTE].uniqueBoundSets) {
2891        // Get the set node
2892        pSet = getSetNode(dev_data, set);
2893        // For each update in the set
2894        for (auto pUpdate : pSet->pDescriptorUpdates) {
2895            // If it's a write update to STORAGE type capture image/buffer being updated
2896            if (pUpdate && (pUpdate->sType == VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET)) {
2897                pWDS = reinterpret_cast<VkWriteDescriptorSet *>(pUpdate);
2898                if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
2899                    for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2900                        pCB->updateImages.insert(pWDS->pImageInfo[j].imageView);
2901                    }
2902                } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
2903                    for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2904                        pCB->updateBuffers.insert(dev_data->bufferViewMap[pWDS->pTexelBufferView[j]].buffer);
2905                    }
2906                } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2907                           pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
2908                    for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2909                        pCB->updateBuffers.insert(pWDS->pBufferInfo[j].buffer);
2910                    }
2911                }
2912            }
2913        }
2914    }
2915}
2916
2917// Validate overall state at the time of a draw call
2918static VkBool32 validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, const VkBool32 indexedDraw,
2919                                               const VkPipelineBindPoint bindPoint) {
2920    VkBool32 result = VK_FALSE;
2921    auto const &state = pCB->lastBound[bindPoint];
2922    PIPELINE_NODE *pPipe = getPipeline(my_data, state.pipeline);
2923    // First check flag states
2924    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
2925        result = validate_draw_state_flags(my_data, pCB, pPipe, indexedDraw);
2926
2927    // Now complete other state checks
2928    // TODO : Currently only performing next check if *something* was bound (non-zero last bound)
2929    //  There is probably a better way to gate when this check happens, and to know if something *should* have been bound
2930    //  We should have that check separately and then gate this check based on that check
2931    if (pPipe) {
2932        if (state.pipelineLayout) {
2933            string errorString;
2934            // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
2935            vector<std::pair<SET_NODE *, unordered_set<uint32_t>>> activeSetBindingsPairs;
2936            for (auto setBindingPair : pPipe->active_slots) {
2937                uint32_t setIndex = setBindingPair.first;
2938                // If valid set is not bound throw an error
2939                if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
2940                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2941                                      __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
2942                                      "VkPipeline %#" PRIxLEAST64 " uses set #%u but that set is not bound.",
2943                                      (uint64_t)pPipe->pipeline, setIndex);
2944                } else if (!verify_set_layout_compatibility(my_data, my_data->setMap[state.boundDescriptorSets[setIndex]],
2945                                                            pPipe->graphicsPipelineCI.layout, setIndex, errorString)) {
2946                    // Set is bound but not compatible w/ overlapping pipelineLayout from PSO
2947                    VkDescriptorSet setHandle = my_data->setMap[state.boundDescriptorSets[setIndex]]->set;
2948                    result |= log_msg(
2949                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2950                        (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
2951                        "VkDescriptorSet (%#" PRIxLEAST64
2952                        ") bound as set #%u is not compatible with overlapping VkPipelineLayout %#" PRIxLEAST64 " due to: %s",
2953                        (uint64_t)setHandle, setIndex, (uint64_t)pPipe->graphicsPipelineCI.layout, errorString.c_str());
2954                } else { // Valid set is bound and layout compatible, validate that it's updated
2955                    // Pull the set node
2956                    SET_NODE *pSet = my_data->setMap[state.boundDescriptorSets[setIndex]];
2957                    // Save vector of all active sets to verify dynamicOffsets below
2958                    // activeSetNodes.push_back(pSet);
2959                    activeSetBindingsPairs.push_back(std::make_pair(pSet, setBindingPair.second));
2960                    // Make sure set has been updated
2961                    if (!pSet->pUpdateStructs) {
2962                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2963                                          VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pSet->set, __LINE__,
2964                                          DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2965                                          "DS %#" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
2966                                                              "this will result in undefined behavior.",
2967                                          (uint64_t)pSet->set);
2968                    }
2969                }
2970            }
2971            // For given active slots, verify any dynamic descriptors and record updated images & buffers
2972            result |= validate_and_update_drawtime_descriptor_state(my_data, pCB, activeSetBindingsPairs);
2973        }
2974        if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint) {
2975            // Verify Vtx binding
2976            if (pPipe->vertexBindingDescriptions.size() > 0) {
2977                for (size_t i = 0; i < pPipe->vertexBindingDescriptions.size(); i++) {
2978                    if ((pCB->currentDrawData.buffers.size() < (i + 1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) {
2979                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2980                                          __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2981                                          "The Pipeline State Object (%#" PRIxLEAST64
2982                                          ") expects that this Command Buffer's vertex binding Index " PRINTF_SIZE_T_SPECIFIER
2983                                          " should be set via vkCmdBindVertexBuffers.",
2984                                          (uint64_t)state.pipeline, i);
2985                    }
2986                }
2987            } else {
2988                if (!pCB->currentDrawData.buffers.empty()) {
2989                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
2990                                      (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2991                                      "Vertex buffers are bound to command buffer (%#" PRIxLEAST64
2992                                      ") but no vertex buffers are attached to this Pipeline State Object (%#" PRIxLEAST64 ").",
2993                                      (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline);
2994                }
2995            }
2996            // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2997            // Skip check if rasterization is disabled or there is no viewport.
2998            if ((!pPipe->graphicsPipelineCI.pRasterizationState ||
2999                 !pPipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) &&
3000                pPipe->graphicsPipelineCI.pViewportState) {
3001                VkBool32 dynViewport = isDynamic(pPipe, VK_DYNAMIC_STATE_VIEWPORT);
3002                VkBool32 dynScissor = isDynamic(pPipe, VK_DYNAMIC_STATE_SCISSOR);
3003                if (dynViewport) {
3004                    if (pCB->viewports.size() != pPipe->graphicsPipelineCI.pViewportState->viewportCount) {
3005                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3006                                          __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3007                                          "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER
3008                                          ", but PSO viewportCount is %u. These counts must match.",
3009                                          pCB->viewports.size(), pPipe->graphicsPipelineCI.pViewportState->viewportCount);
3010                    }
3011                }
3012                if (dynScissor) {
3013                    if (pCB->scissors.size() != pPipe->graphicsPipelineCI.pViewportState->scissorCount) {
3014                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3015                                          __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3016                                          "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER
3017                                          ", but PSO scissorCount is %u. These counts must match.",
3018                                          pCB->scissors.size(), pPipe->graphicsPipelineCI.pViewportState->scissorCount);
3019                    }
3020                }
3021            }
3022        }
3023    }
3024    return result;
3025}
3026
3027// Verify that create state for a pipeline is valid
3028static VkBool32 verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
3029                                          int pipelineIndex) {
3030    VkBool32 skipCall = VK_FALSE;
3031
3032    PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
3033
3034    // If create derivative bit is set, check that we've specified a base
3035    // pipeline correctly, and that the base pipeline was created to allow
3036    // derivatives.
3037    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3038        PIPELINE_NODE *pBasePipeline = nullptr;
3039        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3040              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3041            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3042                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3043                                "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3044        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3045            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3046                skipCall |=
3047                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3048                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3049                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
3050            } else {
3051                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3052            }
3053        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3054            pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3055        }
3056
3057        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3058            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3059                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3060                                "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3061        }
3062    }
3063
3064    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3065        if (!my_data->physDevProperties.features.independentBlend) {
3066            if (pPipeline->attachments.size() > 1) {
3067                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3068                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3069                    if ((pAttachments[0].blendEnable != pAttachments[i].blendEnable) ||
3070                        (pAttachments[0].srcColorBlendFactor != pAttachments[i].srcColorBlendFactor) ||
3071                        (pAttachments[0].dstColorBlendFactor != pAttachments[i].dstColorBlendFactor) ||
3072                        (pAttachments[0].colorBlendOp != pAttachments[i].colorBlendOp) ||
3073                        (pAttachments[0].srcAlphaBlendFactor != pAttachments[i].srcAlphaBlendFactor) ||
3074                        (pAttachments[0].dstAlphaBlendFactor != pAttachments[i].dstAlphaBlendFactor) ||
3075                        (pAttachments[0].alphaBlendOp != pAttachments[i].alphaBlendOp) ||
3076                        (pAttachments[0].colorWriteMask != pAttachments[i].colorWriteMask)) {
3077                        skipCall |=
3078                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3079                            DRAWSTATE_INDEPENDENT_BLEND, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
3080                            "enabled, all elements of pAttachments must be identical");
3081                    }
3082                }
3083            }
3084        }
3085        if (!my_data->physDevProperties.features.logicOp &&
3086            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3087            skipCall |=
3088                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3089                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
3090                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
3091        }
3092        if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) &&
3093            ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) ||
3094             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
3095            skipCall |=
3096                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3097                        DRAWSTATE_INVALID_LOGIC_OP, "DS",
3098                        "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
3099        }
3100    }
3101
3102    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3103    // produces nonsense errors that confuse users. Other layers should already
3104    // emit errors for renderpass being invalid.
3105    auto rp_data = my_data->renderPassMap.find(pPipeline->graphicsPipelineCI.renderPass);
3106    if (rp_data != my_data->renderPassMap.end() &&
3107        pPipeline->graphicsPipelineCI.subpass >= rp_data->second->pCreateInfo->subpassCount) {
3108        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3109                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3110                                                                           "is out of range for this renderpass (0..%u)",
3111                            pPipeline->graphicsPipelineCI.subpass, rp_data->second->pCreateInfo->subpassCount - 1);
3112    }
3113
3114    if (!validate_and_capture_pipeline_shader_state(my_data, pPipeline)) {
3115        skipCall = VK_TRUE;
3116    }
3117    // VS is required
3118    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3119        skipCall |=
3120            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3121                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3122    }
3123    // Either both or neither TC/TE shaders should be defined
3124    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3125        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3126        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3127                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3128                            "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3129    }
3130    // Compute shaders should be specified independent of Gfx shaders
3131    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3132        (pPipeline->active_shaders &
3133         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3134          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3135        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3136                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3137                            "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3138    }
3139    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3140    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3141    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3142        (pPipeline->iaStateCI.topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3143        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3144                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3145                                                                           "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3146                                                                           "topology for tessellation pipelines");
3147    }
3148    if (pPipeline->iaStateCI.topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3149        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3150            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3151                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3152                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3153                                                                               "topology is only valid for tessellation pipelines");
3154        }
3155        if (!pPipeline->tessStateCI.patchControlPoints || (pPipeline->tessStateCI.patchControlPoints > 32)) {
3156            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3157                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3158                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3159                                                                               "topology used with patchControlPoints value %u."
3160                                                                               " patchControlPoints should be >0 and <=32.",
3161                                pPipeline->tessStateCI.patchControlPoints);
3162        }
3163    }
3164    // Viewport state must be included if rasterization is enabled.
3165    // If the viewport state is included, the viewport and scissor counts should always match.
3166    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3167    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3168        !pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
3169        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3170            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3171                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3172                                                                           "and scissors are dynamic PSO must include "
3173                                                                           "viewportCount and scissorCount in pViewportState.");
3174        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3175                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3176            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3177                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3178                                "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3179                                pPipeline->vpStateCI.viewportCount, pPipeline->vpStateCI.scissorCount);
3180        } else {
3181            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3182            VkBool32 dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3183            VkBool32 dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3184            if (!dynViewport) {
3185                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3186                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3187                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3188                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3189                                        "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3190                                        "must either include pViewports data, or include viewport in pDynamicState and set it with "
3191                                        "vkCmdSetViewport().",
3192                                        pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3193                }
3194            }
3195            if (!dynScissor) {
3196                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3197                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3198                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3199                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3200                                        "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3201                                        "must either include pScissors data, or include scissor in pDynamicState and set it with "
3202                                        "vkCmdSetScissor().",
3203                                        pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3204                }
3205            }
3206        }
3207    }
3208    return skipCall;
3209}
3210
3211// Init the pipeline mapping info based on pipeline create info LL tree
3212//  Threading note : Calls to this function should wrapped in mutex
3213// TODO : this should really just be in the constructor for PIPELINE_NODE
3214static PIPELINE_NODE *initGraphicsPipeline(layer_data *dev_data, const VkGraphicsPipelineCreateInfo *pCreateInfo) {
3215    PIPELINE_NODE *pPipeline = new PIPELINE_NODE;
3216
3217    // First init create info
3218    memcpy(&pPipeline->graphicsPipelineCI, pCreateInfo, sizeof(VkGraphicsPipelineCreateInfo));
3219
3220    size_t bufferSize = 0;
3221    const VkPipelineVertexInputStateCreateInfo *pVICI = NULL;
3222    const VkPipelineColorBlendStateCreateInfo *pCBCI = NULL;
3223
3224    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
3225        const VkPipelineShaderStageCreateInfo *pPSSCI = &pCreateInfo->pStages[i];
3226
3227        switch (pPSSCI->stage) {
3228        case VK_SHADER_STAGE_VERTEX_BIT:
3229            memcpy(&pPipeline->vsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3230            pPipeline->active_shaders |= VK_SHADER_STAGE_VERTEX_BIT;
3231            break;
3232        case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
3233            memcpy(&pPipeline->tcsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3234            pPipeline->active_shaders |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
3235            break;
3236        case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
3237            memcpy(&pPipeline->tesCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3238            pPipeline->active_shaders |= VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
3239            break;
3240        case VK_SHADER_STAGE_GEOMETRY_BIT:
3241            memcpy(&pPipeline->gsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3242            pPipeline->active_shaders |= VK_SHADER_STAGE_GEOMETRY_BIT;
3243            break;
3244        case VK_SHADER_STAGE_FRAGMENT_BIT:
3245            memcpy(&pPipeline->fsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3246            pPipeline->active_shaders |= VK_SHADER_STAGE_FRAGMENT_BIT;
3247            break;
3248        case VK_SHADER_STAGE_COMPUTE_BIT:
3249            // TODO : Flag error, CS is specified through VkComputePipelineCreateInfo
3250            pPipeline->active_shaders |= VK_SHADER_STAGE_COMPUTE_BIT;
3251            break;
3252        default:
3253            // TODO : Flag error
3254            break;
3255        }
3256    }
3257    // Copy over GraphicsPipelineCreateInfo structure embedded pointers
3258    if (pCreateInfo->stageCount != 0) {
3259        pPipeline->graphicsPipelineCI.pStages = new VkPipelineShaderStageCreateInfo[pCreateInfo->stageCount];
3260        bufferSize = pCreateInfo->stageCount * sizeof(VkPipelineShaderStageCreateInfo);
3261        memcpy((void *)pPipeline->graphicsPipelineCI.pStages, pCreateInfo->pStages, bufferSize);
3262    }
3263    if (pCreateInfo->pVertexInputState != NULL) {
3264        pPipeline->vertexInputCI = *pCreateInfo->pVertexInputState;
3265        // Copy embedded ptrs
3266        pVICI = pCreateInfo->pVertexInputState;
3267        if (pVICI->vertexBindingDescriptionCount) {
3268            pPipeline->vertexBindingDescriptions = std::vector<VkVertexInputBindingDescription>(
3269                pVICI->pVertexBindingDescriptions, pVICI->pVertexBindingDescriptions + pVICI->vertexBindingDescriptionCount);
3270        }
3271        if (pVICI->vertexAttributeDescriptionCount) {
3272            pPipeline->vertexAttributeDescriptions = std::vector<VkVertexInputAttributeDescription>(
3273                pVICI->pVertexAttributeDescriptions, pVICI->pVertexAttributeDescriptions + pVICI->vertexAttributeDescriptionCount);
3274        }
3275        pPipeline->graphicsPipelineCI.pVertexInputState = &pPipeline->vertexInputCI;
3276    }
3277    if (pCreateInfo->pInputAssemblyState != NULL) {
3278        pPipeline->iaStateCI = *pCreateInfo->pInputAssemblyState;
3279        pPipeline->graphicsPipelineCI.pInputAssemblyState = &pPipeline->iaStateCI;
3280    }
3281    if (pCreateInfo->pTessellationState != NULL) {
3282        pPipeline->tessStateCI = *pCreateInfo->pTessellationState;
3283        pPipeline->graphicsPipelineCI.pTessellationState = &pPipeline->tessStateCI;
3284    }
3285    if (pCreateInfo->pViewportState != NULL) {
3286        pPipeline->vpStateCI = *pCreateInfo->pViewportState;
3287        pPipeline->graphicsPipelineCI.pViewportState = &pPipeline->vpStateCI;
3288    }
3289    if (pCreateInfo->pRasterizationState != NULL) {
3290        pPipeline->rsStateCI = *pCreateInfo->pRasterizationState;
3291        pPipeline->graphicsPipelineCI.pRasterizationState = &pPipeline->rsStateCI;
3292    }
3293    if (pCreateInfo->pMultisampleState != NULL) {
3294        pPipeline->msStateCI = *pCreateInfo->pMultisampleState;
3295        pPipeline->graphicsPipelineCI.pMultisampleState = &pPipeline->msStateCI;
3296    }
3297    if (pCreateInfo->pDepthStencilState != NULL) {
3298        pPipeline->dsStateCI = *pCreateInfo->pDepthStencilState;
3299        pPipeline->graphicsPipelineCI.pDepthStencilState = &pPipeline->dsStateCI;
3300    }
3301    if (pCreateInfo->pColorBlendState != NULL) {
3302        pPipeline->cbStateCI = *pCreateInfo->pColorBlendState;
3303        // Copy embedded ptrs
3304        pCBCI = pCreateInfo->pColorBlendState;
3305        if (pCBCI->attachmentCount) {
3306            pPipeline->attachments = std::vector<VkPipelineColorBlendAttachmentState>(
3307                pCBCI->pAttachments, pCBCI->pAttachments + pCBCI->attachmentCount);
3308        }
3309        pPipeline->graphicsPipelineCI.pColorBlendState = &pPipeline->cbStateCI;
3310    }
3311    if (pCreateInfo->pDynamicState != NULL) {
3312        pPipeline->dynStateCI = *pCreateInfo->pDynamicState;
3313        if (pPipeline->dynStateCI.dynamicStateCount) {
3314            pPipeline->dynStateCI.pDynamicStates = new VkDynamicState[pPipeline->dynStateCI.dynamicStateCount];
3315            bufferSize = pPipeline->dynStateCI.dynamicStateCount * sizeof(VkDynamicState);
3316            memcpy((void *)pPipeline->dynStateCI.pDynamicStates, pCreateInfo->pDynamicState->pDynamicStates, bufferSize);
3317        }
3318        pPipeline->graphicsPipelineCI.pDynamicState = &pPipeline->dynStateCI;
3319    }
3320    return pPipeline;
3321}
3322
3323// Free the Pipeline nodes
3324static void deletePipelines(layer_data *my_data) {
3325    if (my_data->pipelineMap.size() <= 0)
3326        return;
3327    for (auto ii = my_data->pipelineMap.begin(); ii != my_data->pipelineMap.end(); ++ii) {
3328        if ((*ii).second->graphicsPipelineCI.stageCount != 0) {
3329            delete[](*ii).second->graphicsPipelineCI.pStages;
3330        }
3331        if ((*ii).second->dynStateCI.dynamicStateCount != 0) {
3332            delete[](*ii).second->dynStateCI.pDynamicStates;
3333        }
3334        delete (*ii).second;
3335    }
3336    my_data->pipelineMap.clear();
3337}
3338
3339// For given pipeline, return number of MSAA samples, or one if MSAA disabled
3340static VkSampleCountFlagBits getNumSamples(layer_data *my_data, const VkPipeline pipeline) {
3341    PIPELINE_NODE *pPipe = my_data->pipelineMap[pipeline];
3342    if (VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pPipe->msStateCI.sType) {
3343        return pPipe->msStateCI.rasterizationSamples;
3344    }
3345    return VK_SAMPLE_COUNT_1_BIT;
3346}
3347
3348// Validate state related to the PSO
3349static VkBool32 validatePipelineState(layer_data *my_data, const GLOBAL_CB_NODE *pCB, const VkPipelineBindPoint pipelineBindPoint,
3350                                      const VkPipeline pipeline) {
3351    VkBool32 skipCall = VK_FALSE;
3352    if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
3353        // Verify that any MSAA request in PSO matches sample# in bound FB
3354        // Skip the check if rasterization is disabled.
3355        PIPELINE_NODE *pPipeline = my_data->pipelineMap[pipeline];
3356        if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3357            !pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
3358            VkSampleCountFlagBits psoNumSamples = getNumSamples(my_data, pipeline);
3359            if (pCB->activeRenderPass) {
3360                const VkRenderPassCreateInfo *pRPCI = my_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
3361                const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
3362                VkSampleCountFlagBits subpassNumSamples = (VkSampleCountFlagBits)0;
3363                uint32_t i;
3364
3365                for (i = 0; i < pSD->colorAttachmentCount; i++) {
3366                    VkSampleCountFlagBits samples;
3367
3368                    if (pSD->pColorAttachments[i].attachment == VK_ATTACHMENT_UNUSED)
3369                        continue;
3370
3371                    samples = pRPCI->pAttachments[pSD->pColorAttachments[i].attachment].samples;
3372                    if (subpassNumSamples == (VkSampleCountFlagBits)0) {
3373                        subpassNumSamples = samples;
3374                    } else if (subpassNumSamples != samples) {
3375                        subpassNumSamples = (VkSampleCountFlagBits)-1;
3376                        break;
3377                    }
3378                }
3379                if (pSD->pDepthStencilAttachment && pSD->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3380                    const VkSampleCountFlagBits samples = pRPCI->pAttachments[pSD->pDepthStencilAttachment->attachment].samples;
3381                    if (subpassNumSamples == (VkSampleCountFlagBits)0)
3382                        subpassNumSamples = samples;
3383                    else if (subpassNumSamples != samples)
3384                        subpassNumSamples = (VkSampleCountFlagBits)-1;
3385                }
3386
3387                if ((pSD->colorAttachmentCount > 0 || pSD->pDepthStencilAttachment) &&
3388                    psoNumSamples != subpassNumSamples) {
3389                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3390                                        (uint64_t)pipeline, __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3391                                        "Num samples mismatch! Binding PSO (%#" PRIxLEAST64
3392                                        ") with %u samples while current RenderPass (%#" PRIxLEAST64 ") w/ %u samples!",
3393                                        (uint64_t)pipeline, psoNumSamples, (uint64_t)pCB->activeRenderPass, subpassNumSamples);
3394                }
3395            } else {
3396                // TODO : I believe it's an error if we reach this point and don't have an activeRenderPass
3397                //   Verify and flag error as appropriate
3398            }
3399        }
3400        // TODO : Add more checks here
3401    } else {
3402        // TODO : Validate non-gfx pipeline updates
3403    }
3404    return skipCall;
3405}
3406
3407// Block of code at start here specifically for managing/tracking DSs
3408
3409// Return Pool node ptr for specified pool or else NULL
3410static DESCRIPTOR_POOL_NODE *getPoolNode(layer_data *my_data, const VkDescriptorPool pool) {
3411    if (my_data->descriptorPoolMap.find(pool) == my_data->descriptorPoolMap.end()) {
3412        return NULL;
3413    }
3414    return my_data->descriptorPoolMap[pool];
3415}
3416
3417static LAYOUT_NODE *getLayoutNode(layer_data *my_data, const VkDescriptorSetLayout layout) {
3418    if (my_data->descriptorSetLayoutMap.find(layout) == my_data->descriptorSetLayoutMap.end()) {
3419        return NULL;
3420    }
3421    return my_data->descriptorSetLayoutMap[layout];
3422}
3423
3424// Return VK_FALSE if update struct is of valid type, otherwise flag error and return code from callback
3425static VkBool32 validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3426    switch (pUpdateStruct->sType) {
3427    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3428    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3429        return VK_FALSE;
3430    default:
3431        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3432                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3433                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3434                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3435    }
3436}
3437
3438// Set count for given update struct in the last parameter
3439// Return value of skipCall, which is only VK_TRUE if error occurs and callback signals execution to cease
3440static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3441    switch (pUpdateStruct->sType) {
3442    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3443        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3444    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3445        // TODO : Need to understand this case better and make sure code is correct
3446        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3447    default:
3448        return 0;
3449    }
3450    return 0;
3451}
3452
3453// For given layout and update, return the first overall index of the layout that is updated
3454static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding,
3455                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3456    return getBindingStartIndex(pLayout, binding) + arrayIndex;
3457}
3458
3459// For given layout and update, return the last overall index of the layout that is updated
3460static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding,
3461                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3462    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3463    return getBindingStartIndex(pLayout, binding) + arrayIndex + count - 1;
3464}
3465
3466// Verify that the descriptor type in the update struct matches what's expected by the layout
3467static VkBool32 validateUpdateConsistency(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout,
3468                                          const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3469    // First get actual type of update
3470    VkBool32 skipCall = VK_FALSE;
3471    VkDescriptorType actualType;
3472    uint32_t i = 0;
3473    switch (pUpdateStruct->sType) {
3474    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3475        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3476        break;
3477    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3478        /* no need to validate */
3479        return VK_FALSE;
3480        break;
3481    default:
3482        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3483                            DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3484                            "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3485                            string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3486    }
3487    if (VK_FALSE == skipCall) {
3488        // Set first stageFlags as reference and verify that all other updates match it
3489        VkShaderStageFlags refStageFlags = pLayout->stageFlags[startIndex];
3490        for (i = startIndex; i <= endIndex; i++) {
3491            if (pLayout->descriptorTypes[i] != actualType) {
3492                skipCall |= log_msg(
3493                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3494                    DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3495                    "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3496                    string_VkDescriptorType(actualType), string_VkDescriptorType(pLayout->descriptorTypes[i]));
3497            }
3498            if (pLayout->stageFlags[i] != refStageFlags) {
3499                skipCall |= log_msg(
3500                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3501                    DRAWSTATE_DESCRIPTOR_STAGEFLAGS_MISMATCH, "DS",
3502                    "Write descriptor update has stageFlags %x that do not match overlapping binding descriptor stageFlags of %x!",
3503                    refStageFlags, pLayout->stageFlags[i]);
3504            }
3505        }
3506    }
3507    return skipCall;
3508}
3509
3510// Determine the update type, allocate a new struct of that type, shadow the given pUpdate
3511//   struct into the pNewNode param. Return VK_TRUE if error condition encountered and callback signals early exit.
3512// NOTE : Calls to this function should be wrapped in mutex
3513static VkBool32 shadowUpdateNode(layer_data *my_data, const VkDevice device, GENERIC_HEADER *pUpdate, GENERIC_HEADER **pNewNode) {
3514    VkBool32 skipCall = VK_FALSE;
3515    VkWriteDescriptorSet *pWDS = NULL;
3516    VkCopyDescriptorSet *pCDS = NULL;
3517    switch (pUpdate->sType) {
3518    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3519        pWDS = new VkWriteDescriptorSet;
3520        *pNewNode = (GENERIC_HEADER *)pWDS;
3521        memcpy(pWDS, pUpdate, sizeof(VkWriteDescriptorSet));
3522
3523        switch (pWDS->descriptorType) {
3524        case VK_DESCRIPTOR_TYPE_SAMPLER:
3525        case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3526        case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3527        case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
3528            VkDescriptorImageInfo *info = new VkDescriptorImageInfo[pWDS->descriptorCount];
3529            memcpy(info, pWDS->pImageInfo, pWDS->descriptorCount * sizeof(VkDescriptorImageInfo));
3530            pWDS->pImageInfo = info;
3531        } break;
3532        case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3533        case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
3534            VkBufferView *info = new VkBufferView[pWDS->descriptorCount];
3535            memcpy(info, pWDS->pTexelBufferView, pWDS->descriptorCount * sizeof(VkBufferView));
3536            pWDS->pTexelBufferView = info;
3537        } break;
3538        case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3539        case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3540        case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3541        case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
3542            VkDescriptorBufferInfo *info = new VkDescriptorBufferInfo[pWDS->descriptorCount];
3543            memcpy(info, pWDS->pBufferInfo, pWDS->descriptorCount * sizeof(VkDescriptorBufferInfo));
3544            pWDS->pBufferInfo = info;
3545        } break;
3546        default:
3547            return VK_ERROR_VALIDATION_FAILED_EXT;
3548            break;
3549        }
3550        break;
3551    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3552        pCDS = new VkCopyDescriptorSet;
3553        *pNewNode = (GENERIC_HEADER *)pCDS;
3554        memcpy(pCDS, pUpdate, sizeof(VkCopyDescriptorSet));
3555        break;
3556    default:
3557        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3558                    DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3559                    "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3560                    string_VkStructureType(pUpdate->sType), pUpdate->sType))
3561            return VK_TRUE;
3562    }
3563    // Make sure that pNext for the end of shadow copy is NULL
3564    (*pNewNode)->pNext = NULL;
3565    return skipCall;
3566}
3567
3568// Verify that given sampler is valid
3569static VkBool32 validateSampler(const layer_data *my_data, const VkSampler *pSampler, const VkBool32 immutable) {
3570    VkBool32 skipCall = VK_FALSE;
3571    auto sampIt = my_data->sampleMap.find(*pSampler);
3572    if (sampIt == my_data->sampleMap.end()) {
3573        if (!immutable) {
3574            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3575                                (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3576                                "vkUpdateDescriptorSets: Attempt to update descriptor with invalid sampler %#" PRIxLEAST64,
3577                                (uint64_t)*pSampler);
3578        } else { // immutable
3579            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3580                                (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3581                                "vkUpdateDescriptorSets: Attempt to update descriptor whose binding has an invalid immutable "
3582                                "sampler %#" PRIxLEAST64,
3583                                (uint64_t)*pSampler);
3584        }
3585    } else {
3586        // TODO : Any further checks we want to do on the sampler?
3587    }
3588    return skipCall;
3589}
3590
3591//TODO: Consolidate functions
3592bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
3593    layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
3594    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3595        return false;
3596    }
3597    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3598    imgpair.subresource.aspectMask = aspectMask;
3599    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3600    if (imgsubIt == pCB->imageLayoutMap.end()) {
3601        return false;
3602    }
3603    if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
3604        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3605                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3606                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3607                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout));
3608    }
3609    if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
3610        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3611                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3612                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s",
3613                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout));
3614    }
3615    node = imgsubIt->second;
3616    return true;
3617}
3618
3619bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
3620    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3621        return false;
3622    }
3623    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3624    imgpair.subresource.aspectMask = aspectMask;
3625    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3626    if (imgsubIt == my_data->imageLayoutMap.end()) {
3627        return false;
3628    }
3629    if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
3630        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3631                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3632                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3633                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout));
3634    }
3635    layout = imgsubIt->second.layout;
3636    return true;
3637}
3638
3639// find layout(s) on the cmd buf level
3640bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3641    ImageSubresourcePair imgpair = {image, true, range};
3642    node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
3643    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
3644    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
3645    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
3646    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
3647    if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3648        imgpair = {image, false, VkImageSubresource()};
3649        auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3650        if (imgsubIt == pCB->imageLayoutMap.end())
3651            return false;
3652        node = imgsubIt->second;
3653    }
3654    return true;
3655}
3656
3657// find layout(s) on the global level
3658bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3659    layout = VK_IMAGE_LAYOUT_MAX_ENUM;
3660    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3661    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3662    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3663    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3664    if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3665        imgpair = {imgpair.image, false, VkImageSubresource()};
3666        auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3667        if (imgsubIt == my_data->imageLayoutMap.end())
3668            return false;
3669        layout = imgsubIt->second.layout;
3670    }
3671    return true;
3672}
3673
3674bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3675    ImageSubresourcePair imgpair = {image, true, range};
3676    return FindLayout(my_data, imgpair, layout);
3677}
3678
3679bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3680    auto sub_data = my_data->imageSubresourceMap.find(image);
3681    if (sub_data == my_data->imageSubresourceMap.end())
3682        return false;
3683    auto imgIt = my_data->imageMap.find(image);
3684    if (imgIt == my_data->imageMap.end())
3685        return false;
3686    bool ignoreGlobal = false;
3687    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3688    // potential errors in this case.
3689    if (sub_data->second.size() >= (imgIt->second.createInfo.arrayLayers * imgIt->second.createInfo.mipLevels + 1)) {
3690        ignoreGlobal = true;
3691    }
3692    for (auto imgsubpair : sub_data->second) {
3693        if (ignoreGlobal && !imgsubpair.hasSubresource)
3694            continue;
3695        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3696        if (img_data != my_data->imageLayoutMap.end()) {
3697            layouts.push_back(img_data->second.layout);
3698        }
3699    }
3700    return true;
3701}
3702
3703// Set the layout on the global level
3704void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3705    VkImage &image = imgpair.image;
3706    // TODO (mlentine): Maybe set format if new? Not used atm.
3707    my_data->imageLayoutMap[imgpair].layout = layout;
3708    // TODO (mlentine): Maybe make vector a set?
3709    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3710    if (subresource == my_data->imageSubresourceMap[image].end()) {
3711        my_data->imageSubresourceMap[image].push_back(imgpair);
3712    }
3713}
3714
3715// Set the layout on the cmdbuf level
3716void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3717    pCB->imageLayoutMap[imgpair] = node;
3718    // TODO (mlentine): Maybe make vector a set?
3719    auto subresource =
3720        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3721    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3722        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3723    }
3724}
3725
3726void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3727    // TODO (mlentine): Maybe make vector a set?
3728    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3729        pCB->imageSubresourceMap[imgpair.image].end()) {
3730        pCB->imageLayoutMap[imgpair].layout = layout;
3731    } else {
3732        // TODO (mlentine): Could be expensive and might need to be removed.
3733        assert(imgpair.hasSubresource);
3734        IMAGE_CMD_BUF_LAYOUT_NODE node;
3735        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3736            node.initialLayout = layout;
3737        }
3738        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3739    }
3740}
3741
3742template <class OBJECT, class LAYOUT>
3743void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3744    if (imgpair.subresource.aspectMask & aspectMask) {
3745        imgpair.subresource.aspectMask = aspectMask;
3746        SetLayout(pObject, imgpair, layout);
3747    }
3748}
3749
3750template <class OBJECT, class LAYOUT>
3751void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3752    ImageSubresourcePair imgpair = {image, true, range};
3753    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3754    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3755    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3756    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3757}
3758
3759template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3760    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3761    SetLayout(pObject, image, imgpair, layout);
3762}
3763
3764void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3765    auto image_view_data = dev_data->imageViewMap.find(imageView);
3766    assert(image_view_data != dev_data->imageViewMap.end());
3767    const VkImage &image = image_view_data->second.image;
3768    const VkImageSubresourceRange &subRange = image_view_data->second.subresourceRange;
3769    // TODO: Do not iterate over every possibility - consolidate where possible
3770    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3771        uint32_t level = subRange.baseMipLevel + j;
3772        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3773            uint32_t layer = subRange.baseArrayLayer + k;
3774            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3775            SetLayout(pCB, image, sub, layout);
3776        }
3777    }
3778}
3779
3780// Verify that given imageView is valid
3781static VkBool32 validateImageView(const layer_data *my_data, const VkImageView *pImageView, const VkImageLayout imageLayout) {
3782    VkBool32 skipCall = VK_FALSE;
3783    auto ivIt = my_data->imageViewMap.find(*pImageView);
3784    if (ivIt == my_data->imageViewMap.end()) {
3785        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3786                            (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3787                            "vkUpdateDescriptorSets: Attempt to update descriptor with invalid imageView %#" PRIxLEAST64,
3788                            (uint64_t)*pImageView);
3789    } else {
3790        // Validate that imageLayout is compatible with aspectMask and image format
3791        VkImageAspectFlags aspectMask = ivIt->second.subresourceRange.aspectMask;
3792        VkImage image = ivIt->second.image;
3793        // TODO : Check here in case we have a bad image
3794        VkFormat format = VK_FORMAT_MAX_ENUM;
3795        auto imgIt = my_data->imageMap.find(image);
3796        if (imgIt != my_data->imageMap.end()) {
3797            format = (*imgIt).second.createInfo.format;
3798        } else {
3799            // Also need to check the swapchains.
3800            auto swapchainIt = my_data->device_extensions.imageToSwapchainMap.find(image);
3801            if (swapchainIt != my_data->device_extensions.imageToSwapchainMap.end()) {
3802                VkSwapchainKHR swapchain = swapchainIt->second;
3803                auto swapchain_nodeIt = my_data->device_extensions.swapchainMap.find(swapchain);
3804                if (swapchain_nodeIt != my_data->device_extensions.swapchainMap.end()) {
3805                    SWAPCHAIN_NODE *pswapchain_node = swapchain_nodeIt->second;
3806                    format = pswapchain_node->createInfo.imageFormat;
3807                }
3808            }
3809        }
3810        if (format == VK_FORMAT_MAX_ENUM) {
3811            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3812                                (uint64_t)image, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3813                                "vkUpdateDescriptorSets: Attempt to update descriptor with invalid image %#" PRIxLEAST64
3814                                " in imageView %#" PRIxLEAST64,
3815                                (uint64_t)image, (uint64_t)*pImageView);
3816        } else {
3817            VkBool32 ds = vk_format_is_depth_or_stencil(format);
3818            switch (imageLayout) {
3819            case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
3820                // Only Color bit must be set
3821                if ((aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
3822                    skipCall |=
3823                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3824                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3825                                "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3826                                "and imageView %#" PRIxLEAST64 ""
3827                                " that does not have VK_IMAGE_ASPECT_COLOR_BIT set.",
3828                                (uint64_t)*pImageView);
3829                }
3830                // format must NOT be DS
3831                if (ds) {
3832                    skipCall |=
3833                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3834                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3835                                "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3836                                "and imageView %#" PRIxLEAST64 ""
3837                                " but the image format is %s which is not a color format.",
3838                                (uint64_t)*pImageView, string_VkFormat(format));
3839                }
3840                break;
3841            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
3842            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
3843                // Depth or stencil bit must be set, but both must NOT be set
3844                if (aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
3845                    if (aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
3846                        // both  must NOT be set
3847                        skipCall |=
3848                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3849                                    (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3850                                    "vkUpdateDescriptorSets: Updating descriptor with imageView %#" PRIxLEAST64 ""
3851                                    " that has both STENCIL and DEPTH aspects set",
3852                                    (uint64_t)*pImageView);
3853                    }
3854                } else if (!(aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
3855                    // Neither were set
3856                    skipCall |=
3857                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3858                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3859                                "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3860                                " that does not have STENCIL or DEPTH aspect set.",
3861                                string_VkImageLayout(imageLayout), (uint64_t)*pImageView);
3862                }
3863                // format must be DS
3864                if (!ds) {
3865                    skipCall |=
3866                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3867                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3868                                "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3869                                " but the image format is %s which is not a depth/stencil format.",
3870                                string_VkImageLayout(imageLayout), (uint64_t)*pImageView, string_VkFormat(format));
3871                }
3872                break;
3873            default:
3874                // anything to check for other layouts?
3875                break;
3876            }
3877        }
3878    }
3879    return skipCall;
3880}
3881
3882// Verify that given bufferView is valid
3883static VkBool32 validateBufferView(const layer_data *my_data, const VkBufferView *pBufferView) {
3884    VkBool32 skipCall = VK_FALSE;
3885    auto sampIt = my_data->bufferViewMap.find(*pBufferView);
3886    if (sampIt == my_data->bufferViewMap.end()) {
3887        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT,
3888                            (uint64_t)*pBufferView, __LINE__, DRAWSTATE_BUFFERVIEW_DESCRIPTOR_ERROR, "DS",
3889                            "vkUpdateDescriptorSets: Attempt to update descriptor with invalid bufferView %#" PRIxLEAST64,
3890                            (uint64_t)*pBufferView);
3891    } else {
3892        // TODO : Any further checks we want to do on the bufferView?
3893    }
3894    return skipCall;
3895}
3896
3897// Verify that given bufferInfo is valid
3898static VkBool32 validateBufferInfo(const layer_data *my_data, const VkDescriptorBufferInfo *pBufferInfo) {
3899    VkBool32 skipCall = VK_FALSE;
3900    auto sampIt = my_data->bufferMap.find(pBufferInfo->buffer);
3901    if (sampIt == my_data->bufferMap.end()) {
3902        skipCall |=
3903            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3904                    (uint64_t)pBufferInfo->buffer, __LINE__, DRAWSTATE_BUFFERINFO_DESCRIPTOR_ERROR, "DS",
3905                    "vkUpdateDescriptorSets: Attempt to update descriptor where bufferInfo has invalid buffer %#" PRIxLEAST64,
3906                    (uint64_t)pBufferInfo->buffer);
3907    } else {
3908        // TODO : Any further checks we want to do on the bufferView?
3909    }
3910    return skipCall;
3911}
3912
3913static VkBool32 validateUpdateContents(const layer_data *my_data, const VkWriteDescriptorSet *pWDS,
3914                                       const VkDescriptorSetLayoutBinding *pLayoutBinding) {
3915    VkBool32 skipCall = VK_FALSE;
3916    // First verify that for the given Descriptor type, the correct DescriptorInfo data is supplied
3917    const VkSampler *pSampler = NULL;
3918    VkBool32 immutable = VK_FALSE;
3919    uint32_t i = 0;
3920    // For given update type, verify that update contents are correct
3921    switch (pWDS->descriptorType) {
3922    case VK_DESCRIPTOR_TYPE_SAMPLER:
3923        for (i = 0; i < pWDS->descriptorCount; ++i) {
3924            skipCall |= validateSampler(my_data, &(pWDS->pImageInfo[i].sampler), immutable);
3925        }
3926        break;
3927    case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3928        for (i = 0; i < pWDS->descriptorCount; ++i) {
3929            if (NULL == pLayoutBinding->pImmutableSamplers) {
3930                pSampler = &(pWDS->pImageInfo[i].sampler);
3931                if (immutable) {
3932                    skipCall |= log_msg(
3933                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3934                        (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3935                        "vkUpdateDescriptorSets: Update #%u is not an immutable sampler %#" PRIxLEAST64
3936                        ", but previous update(s) from this "
3937                        "VkWriteDescriptorSet struct used an immutable sampler. All updates from a single struct must either "
3938                        "use immutable or non-immutable samplers.",
3939                        i, (uint64_t)*pSampler);
3940                }
3941            } else {
3942                if (i > 0 && !immutable) {
3943                    skipCall |= log_msg(
3944                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3945                        (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3946                        "vkUpdateDescriptorSets: Update #%u is an immutable sampler, but previous update(s) from this "
3947                        "VkWriteDescriptorSet struct used a non-immutable sampler. All updates from a single struct must either "
3948                        "use immutable or non-immutable samplers.",
3949                        i);
3950                }
3951                immutable = VK_TRUE;
3952                pSampler = &(pLayoutBinding->pImmutableSamplers[i]);
3953            }
3954            skipCall |= validateSampler(my_data, pSampler, immutable);
3955        }
3956    // Intentionally fall through here to also validate image stuff
3957    case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3958    case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
3959    case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
3960        for (i = 0; i < pWDS->descriptorCount; ++i) {
3961            skipCall |= validateImageView(my_data, &(pWDS->pImageInfo[i].imageView), pWDS->pImageInfo[i].imageLayout);
3962        }
3963        break;
3964    case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3965    case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
3966        for (i = 0; i < pWDS->descriptorCount; ++i) {
3967            skipCall |= validateBufferView(my_data, &(pWDS->pTexelBufferView[i]));
3968        }
3969        break;
3970    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3971    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3972    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3973    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
3974        for (i = 0; i < pWDS->descriptorCount; ++i) {
3975            skipCall |= validateBufferInfo(my_data, &(pWDS->pBufferInfo[i]));
3976        }
3977        break;
3978    default:
3979        break;
3980    }
3981    return skipCall;
3982}
3983// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3984// func_str is the name of the calling function
3985// Return VK_FALSE if no errors occur
3986// Return VK_TRUE if validation error occurs and callback returns VK_TRUE (to skip upcoming API call down the chain)
3987VkBool32 validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
3988    VkBool32 skip_call = VK_FALSE;
3989    auto set_node = my_data->setMap.find(set);
3990    if (set_node == my_data->setMap.end()) {
3991        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3992                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3993                             "Cannot call %s() on descriptor set %" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3994                             (uint64_t)(set));
3995    } else {
3996        if (set_node->second->in_use.load()) {
3997            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3998                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
3999                                 "DS", "Cannot call %s() on descriptor set %" PRIxLEAST64 " that is in use by a command buffer.",
4000                                 func_str.c_str(), (uint64_t)(set));
4001        }
4002    }
4003    return skip_call;
4004}
4005static void invalidateBoundCmdBuffers(layer_data *dev_data, const SET_NODE *pSet) {
4006    // Flag any CBs this set is bound to as INVALID
4007    for (auto cb : pSet->boundCmdBuffers) {
4008        auto cb_node = dev_data->commandBufferMap.find(cb);
4009        if (cb_node != dev_data->commandBufferMap.end()) {
4010            cb_node->second->state = CB_INVALID;
4011        }
4012    }
4013}
4014// update DS mappings based on write and copy update arrays
4015static VkBool32 dsUpdate(layer_data *my_data, VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pWDS,
4016                         uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pCDS) {
4017    VkBool32 skipCall = VK_FALSE;
4018
4019    LAYOUT_NODE *pLayout = NULL;
4020    VkDescriptorSetLayoutCreateInfo *pLayoutCI = NULL;
4021    // Validate Write updates
4022    uint32_t i = 0;
4023    for (i = 0; i < descriptorWriteCount; i++) {
4024        VkDescriptorSet ds = pWDS[i].dstSet;
4025        SET_NODE *pSet = my_data->setMap[ds];
4026        // Set being updated cannot be in-flight
4027        if ((skipCall = validateIdleDescriptorSet(my_data, ds, "VkUpdateDescriptorSets")) == VK_TRUE)
4028            return skipCall;
4029        // If set is bound to any cmdBuffers, mark them invalid
4030        invalidateBoundCmdBuffers(my_data, pSet);
4031        GENERIC_HEADER *pUpdate = (GENERIC_HEADER *)&pWDS[i];
4032        pLayout = pSet->pLayout;
4033        // First verify valid update struct
4034        if ((skipCall = validUpdateStruct(my_data, device, pUpdate)) == VK_TRUE) {
4035            break;
4036        }
4037        uint32_t binding = 0, endIndex = 0;
4038        binding = pWDS[i].dstBinding;
4039        auto bindingToIndex = pLayout->bindingToIndexMap.find(binding);
4040        // Make sure that layout being updated has the binding being updated
4041        if (bindingToIndex == pLayout->bindingToIndexMap.end()) {
4042            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4043                                (uint64_t)(ds), __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
4044                                "Descriptor Set %" PRIu64 " does not have binding to match "
4045                                "update binding %u for update type "
4046                                "%s!",
4047                                (uint64_t)(ds), binding, string_VkStructureType(pUpdate->sType));
4048        } else {
4049            // Next verify that update falls within size of given binding
4050            endIndex = getUpdateEndIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate);
4051            if (getBindingEndIndex(pLayout, binding) < endIndex) {
4052                pLayoutCI = &pLayout->createInfo;
4053                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
4054                skipCall |=
4055                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4056                            (uint64_t)(ds), __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4057                            "Descriptor update type of %s is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4058                            string_VkStructureType(pUpdate->sType), binding, DSstr.c_str());
4059            } else { // TODO : should we skip update on a type mismatch or force it?
4060                uint32_t startIndex;
4061                startIndex = getUpdateStartIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate);
4062                // Layout bindings match w/ update, now verify that update type
4063                // & stageFlags are the same for entire update
4064                if ((skipCall = validateUpdateConsistency(my_data, device, pLayout, pUpdate, startIndex, endIndex)) == VK_FALSE) {
4065                    // The update is within bounds and consistent, but need to
4066                    // make sure contents make sense as well
4067                    if ((skipCall = validateUpdateContents(my_data, &pWDS[i],
4068                                                           &pLayout->createInfo.pBindings[bindingToIndex->second])) == VK_FALSE) {
4069                        // Update is good. Save the update info
4070                        // Create new update struct for this set's shadow copy
4071                        GENERIC_HEADER *pNewNode = NULL;
4072                        skipCall |= shadowUpdateNode(my_data, device, pUpdate, &pNewNode);
4073                        if (NULL == pNewNode) {
4074                            skipCall |= log_msg(
4075                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4076                                (uint64_t)(ds), __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
4077                                "Out of memory while attempting to allocate UPDATE struct in vkUpdateDescriptors()");
4078                        } else {
4079                            // Insert shadow node into LL of updates for this set
4080                            pNewNode->pNext = pSet->pUpdateStructs;
4081                            pSet->pUpdateStructs = pNewNode;
4082                            // Now update appropriate descriptor(s) to point to new Update node
4083                            for (uint32_t j = startIndex; j <= endIndex; j++) {
4084                                assert(j < pSet->descriptorCount);
4085                                pSet->pDescriptorUpdates[j] = pNewNode;
4086                            }
4087                        }
4088                    }
4089                }
4090            }
4091        }
4092    }
4093    // Now validate copy updates
4094    for (i = 0; i < descriptorCopyCount; ++i) {
4095        SET_NODE *pSrcSet = NULL, *pDstSet = NULL;
4096        LAYOUT_NODE *pSrcLayout = NULL, *pDstLayout = NULL;
4097        uint32_t srcStartIndex = 0, srcEndIndex = 0, dstStartIndex = 0, dstEndIndex = 0;
4098        // For each copy make sure that update falls within given layout and that types match
4099        pSrcSet = my_data->setMap[pCDS[i].srcSet];
4100        pDstSet = my_data->setMap[pCDS[i].dstSet];
4101        // Set being updated cannot be in-flight
4102        if ((skipCall = validateIdleDescriptorSet(my_data, pDstSet->set, "VkUpdateDescriptorSets")) == VK_TRUE)
4103            return skipCall;
4104        invalidateBoundCmdBuffers(my_data, pDstSet);
4105        pSrcLayout = pSrcSet->pLayout;
4106        pDstLayout = pDstSet->pLayout;
4107        // Validate that src binding is valid for src set layout
4108        if (pSrcLayout->bindingToIndexMap.find(pCDS[i].srcBinding) == pSrcLayout->bindingToIndexMap.end()) {
4109            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4110                                (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
4111                                "Copy descriptor update %u has srcBinding %u "
4112                                "which is out of bounds for underlying SetLayout "
4113                                "%#" PRIxLEAST64 " which only has bindings 0-%u.",
4114                                i, pCDS[i].srcBinding, (uint64_t)pSrcLayout->layout, pSrcLayout->createInfo.bindingCount - 1);
4115        } else if (pDstLayout->bindingToIndexMap.find(pCDS[i].dstBinding) == pDstLayout->bindingToIndexMap.end()) {
4116            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4117                                (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
4118                                "Copy descriptor update %u has dstBinding %u "
4119                                "which is out of bounds for underlying SetLayout "
4120                                "%#" PRIxLEAST64 " which only has bindings 0-%u.",
4121                                i, pCDS[i].dstBinding, (uint64_t)pDstLayout->layout, pDstLayout->createInfo.bindingCount - 1);
4122        } else {
4123            // Proceed with validation. Bindings are ok, but make sure update is within bounds of given layout
4124            srcEndIndex = getUpdateEndIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement,
4125                                            (const GENERIC_HEADER *)&(pCDS[i]));
4126            dstEndIndex = getUpdateEndIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement,
4127                                            (const GENERIC_HEADER *)&(pCDS[i]));
4128            if (getBindingEndIndex(pSrcLayout, pCDS[i].srcBinding) < srcEndIndex) {
4129                pLayoutCI = &pSrcLayout->createInfo;
4130                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
4131                skipCall |=
4132                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4133                            (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4134                            "Copy descriptor src update is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4135                            pCDS[i].srcBinding, DSstr.c_str());
4136            } else if (getBindingEndIndex(pDstLayout, pCDS[i].dstBinding) < dstEndIndex) {
4137                pLayoutCI = &pDstLayout->createInfo;
4138                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
4139                skipCall |=
4140                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4141                            (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4142                            "Copy descriptor dest update is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4143                            pCDS[i].dstBinding, DSstr.c_str());
4144            } else {
4145                srcStartIndex = getUpdateStartIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement,
4146                                                    (const GENERIC_HEADER *)&(pCDS[i]));
4147                dstStartIndex = getUpdateStartIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement,
4148                                                    (const GENERIC_HEADER *)&(pCDS[i]));
4149                for (uint32_t j = 0; j < pCDS[i].descriptorCount; ++j) {
4150                    // For copy just make sure that the types match and then perform the update
4151                    if (pSrcLayout->descriptorTypes[srcStartIndex + j] != pDstLayout->descriptorTypes[dstStartIndex + j]) {
4152                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4153                                            __LINE__, DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
4154                                            "Copy descriptor update index %u, update count #%u, has src update descriptor type %s "
4155                                            "that does not match overlapping dest descriptor type of %s!",
4156                                            i, j + 1, string_VkDescriptorType(pSrcLayout->descriptorTypes[srcStartIndex + j]),
4157                                            string_VkDescriptorType(pDstLayout->descriptorTypes[dstStartIndex + j]));
4158                    } else {
4159                        // point dst descriptor at corresponding src descriptor
4160                        // TODO : This may be a hole. I believe copy should be its own copy,
4161                        //  otherwise a subsequent write update to src will incorrectly affect the copy
4162                        pDstSet->pDescriptorUpdates[j + dstStartIndex] = pSrcSet->pDescriptorUpdates[j + srcStartIndex];
4163                        pDstSet->pUpdateStructs = pSrcSet->pUpdateStructs;
4164                    }
4165                }
4166            }
4167        }
4168    }
4169    return skipCall;
4170}
4171
4172// Verify that given pool has descriptors that are being requested for allocation.
4173// NOTE : Calls to this function should be wrapped in mutex
4174static VkBool32 validate_descriptor_availability_in_pool(layer_data *dev_data, DESCRIPTOR_POOL_NODE *pPoolNode, uint32_t count,
4175                                                         const VkDescriptorSetLayout *pSetLayouts) {
4176    VkBool32 skipCall = VK_FALSE;
4177    uint32_t i = 0;
4178    uint32_t j = 0;
4179
4180    // Track number of descriptorSets allowable in this pool
4181    if (pPoolNode->availableSets < count) {
4182        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4183                            reinterpret_cast<uint64_t &>(pPoolNode->pool), __LINE__, DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
4184                            "Unable to allocate %u descriptorSets from pool %#" PRIxLEAST64
4185                            ". This pool only has %d descriptorSets remaining.",
4186                            count, reinterpret_cast<uint64_t &>(pPoolNode->pool), pPoolNode->availableSets);
4187    } else {
4188        pPoolNode->availableSets -= count;
4189    }
4190
4191    for (i = 0; i < count; ++i) {
4192        LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pSetLayouts[i]);
4193        if (NULL == pLayout) {
4194            skipCall |=
4195                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
4196                        (uint64_t)pSetLayouts[i], __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
4197                        "Unable to find set layout node for layout %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
4198                        (uint64_t)pSetLayouts[i]);
4199        } else {
4200            uint32_t typeIndex = 0, poolSizeCount = 0;
4201            for (j = 0; j < pLayout->createInfo.bindingCount; ++j) {
4202                typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType);
4203                poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount;
4204                if (poolSizeCount > pPoolNode->availableDescriptorTypeCount[typeIndex]) {
4205                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4206                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pLayout->layout, __LINE__,
4207                                        DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
4208                                        "Unable to allocate %u descriptors of type %s from pool %#" PRIxLEAST64
4209                                        ". This pool only has %d descriptors of this type remaining.",
4210                                        poolSizeCount, string_VkDescriptorType(pLayout->createInfo.pBindings[j].descriptorType),
4211                                        (uint64_t)pPoolNode->pool, pPoolNode->availableDescriptorTypeCount[typeIndex]);
4212                } else { // Decrement available descriptors of this type
4213                    pPoolNode->availableDescriptorTypeCount[typeIndex] -= poolSizeCount;
4214                }
4215            }
4216        }
4217    }
4218    return skipCall;
4219}
4220
4221// Free the shadowed update node for this Set
4222// NOTE : Calls to this function should be wrapped in mutex
4223static void freeShadowUpdateTree(SET_NODE *pSet) {
4224    GENERIC_HEADER *pShadowUpdate = pSet->pUpdateStructs;
4225    pSet->pUpdateStructs = NULL;
4226    GENERIC_HEADER *pFreeUpdate = pShadowUpdate;
4227    // Clear the descriptor mappings as they will now be invalid
4228    pSet->pDescriptorUpdates.clear();
4229    while (pShadowUpdate) {
4230        pFreeUpdate = pShadowUpdate;
4231        pShadowUpdate = (GENERIC_HEADER *)pShadowUpdate->pNext;
4232        VkWriteDescriptorSet *pWDS = NULL;
4233        switch (pFreeUpdate->sType) {
4234        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
4235            pWDS = (VkWriteDescriptorSet *)pFreeUpdate;
4236            switch (pWDS->descriptorType) {
4237            case VK_DESCRIPTOR_TYPE_SAMPLER:
4238            case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
4239            case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
4240            case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
4241                delete[] pWDS->pImageInfo;
4242            } break;
4243            case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
4244            case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
4245                delete[] pWDS->pTexelBufferView;
4246            } break;
4247            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
4248            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
4249            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
4250            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
4251                delete[] pWDS->pBufferInfo;
4252            } break;
4253            default:
4254                break;
4255            }
4256            break;
4257        case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
4258            break;
4259        default:
4260            assert(0);
4261            break;
4262        }
4263        delete pFreeUpdate;
4264    }
4265}
4266
4267// Free all DS Pools including their Sets & related sub-structs
4268// NOTE : Calls to this function should be wrapped in mutex
4269static void deletePools(layer_data *my_data) {
4270    if (my_data->descriptorPoolMap.size() <= 0)
4271        return;
4272    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
4273        SET_NODE *pSet = (*ii).second->pSets;
4274        SET_NODE *pFreeSet = pSet;
4275        while (pSet) {
4276            pFreeSet = pSet;
4277            pSet = pSet->pNext;
4278            // Freeing layouts handled in deleteLayouts() function
4279            // Free Update shadow struct tree
4280            freeShadowUpdateTree(pFreeSet);
4281            delete pFreeSet;
4282        }
4283        delete (*ii).second;
4284    }
4285    my_data->descriptorPoolMap.clear();
4286}
4287
4288// WARN : Once deleteLayouts() called, any layout ptrs in Pool/Set data structure will be invalid
4289// NOTE : Calls to this function should be wrapped in mutex
4290static void deleteLayouts(layer_data *my_data) {
4291    if (my_data->descriptorSetLayoutMap.size() <= 0)
4292        return;
4293    for (auto ii = my_data->descriptorSetLayoutMap.begin(); ii != my_data->descriptorSetLayoutMap.end(); ++ii) {
4294        LAYOUT_NODE *pLayout = (*ii).second;
4295        if (pLayout->createInfo.pBindings) {
4296            for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
4297                delete[] pLayout->createInfo.pBindings[i].pImmutableSamplers;
4298            }
4299            delete[] pLayout->createInfo.pBindings;
4300        }
4301        delete pLayout;
4302    }
4303    my_data->descriptorSetLayoutMap.clear();
4304}
4305
4306// Currently clearing a set is removing all previous updates to that set
4307//  TODO : Validate if this is correct clearing behavior
4308static void clearDescriptorSet(layer_data *my_data, VkDescriptorSet set) {
4309    SET_NODE *pSet = getSetNode(my_data, set);
4310    if (!pSet) {
4311        // TODO : Return error
4312    } else {
4313        freeShadowUpdateTree(pSet);
4314    }
4315}
4316
4317static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
4318                                VkDescriptorPoolResetFlags flags) {
4319    DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
4320    if (!pPool) {
4321        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4322                (uint64_t)pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
4323                "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t)pool);
4324    } else {
4325        // TODO: validate flags
4326        // For every set off of this pool, clear it
4327        SET_NODE *pSet = pPool->pSets;
4328        while (pSet) {
4329            clearDescriptorSet(my_data, pSet->set);
4330            pSet = pSet->pNext;
4331        }
4332        // Reset available count for each type and available sets for this pool
4333        for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
4334            pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
4335        }
4336        pPool->availableSets = pPool->maxSets;
4337    }
4338}
4339
4340// For given CB object, fetch associated CB Node from map
4341static GLOBAL_CB_NODE *getCBNode(layer_data *my_data, const VkCommandBuffer cb) {
4342    if (my_data->commandBufferMap.count(cb) == 0) {
4343        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4344                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4345                "Attempt to use CommandBuffer %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
4346        return NULL;
4347    }
4348    return my_data->commandBufferMap[cb];
4349}
4350
4351// Free all CB Nodes
4352// NOTE : Calls to this function should be wrapped in mutex
4353static void deleteCommandBuffers(layer_data *my_data) {
4354    if (my_data->commandBufferMap.size() <= 0) {
4355        return;
4356    }
4357    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
4358        delete (*ii).second;
4359    }
4360    my_data->commandBufferMap.clear();
4361}
4362
4363static VkBool32 report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
4364    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4365                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
4366                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
4367}
4368
4369VkBool32 validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
4370    if (!pCB->activeRenderPass)
4371        return VK_FALSE;
4372    VkBool32 skip_call = VK_FALSE;
4373    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS && cmd_type != CMD_EXECUTECOMMANDS) {
4374        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4375                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4376                             "Commands cannot be called in a subpass using secondary command buffers.");
4377    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
4378        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4379                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4380                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
4381    }
4382    return skip_call;
4383}
4384
4385static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4386    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
4387        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4388                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4389                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4390    return false;
4391}
4392
4393static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4394    if (!(flags & VK_QUEUE_COMPUTE_BIT))
4395        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4396                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4397                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
4398    return false;
4399}
4400
4401static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4402    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
4403        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4404                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4405                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4406    return false;
4407}
4408
4409// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
4410//  in the recording state or if there's an issue with the Cmd ordering
4411static VkBool32 addCmd(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
4412    VkBool32 skipCall = VK_FALSE;
4413    auto pool_data = my_data->commandPoolMap.find(pCB->createInfo.commandPool);
4414    if (pool_data != my_data->commandPoolMap.end()) {
4415        VkQueueFlags flags = my_data->physDevProperties.queue_family_properties[pool_data->second.queueFamilyIndex].queueFlags;
4416        switch (cmd) {
4417        case CMD_BINDPIPELINE:
4418        case CMD_BINDPIPELINEDELTA:
4419        case CMD_BINDDESCRIPTORSETS:
4420        case CMD_FILLBUFFER:
4421        case CMD_CLEARCOLORIMAGE:
4422        case CMD_SETEVENT:
4423        case CMD_RESETEVENT:
4424        case CMD_WAITEVENTS:
4425        case CMD_BEGINQUERY:
4426        case CMD_ENDQUERY:
4427        case CMD_RESETQUERYPOOL:
4428        case CMD_COPYQUERYPOOLRESULTS:
4429        case CMD_WRITETIMESTAMP:
4430            skipCall |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4431            break;
4432        case CMD_SETVIEWPORTSTATE:
4433        case CMD_SETSCISSORSTATE:
4434        case CMD_SETLINEWIDTHSTATE:
4435        case CMD_SETDEPTHBIASSTATE:
4436        case CMD_SETBLENDSTATE:
4437        case CMD_SETDEPTHBOUNDSSTATE:
4438        case CMD_SETSTENCILREADMASKSTATE:
4439        case CMD_SETSTENCILWRITEMASKSTATE:
4440        case CMD_SETSTENCILREFERENCESTATE:
4441        case CMD_BINDINDEXBUFFER:
4442        case CMD_BINDVERTEXBUFFER:
4443        case CMD_DRAW:
4444        case CMD_DRAWINDEXED:
4445        case CMD_DRAWINDIRECT:
4446        case CMD_DRAWINDEXEDINDIRECT:
4447        case CMD_BLITIMAGE:
4448        case CMD_CLEARATTACHMENTS:
4449        case CMD_CLEARDEPTHSTENCILIMAGE:
4450        case CMD_RESOLVEIMAGE:
4451        case CMD_BEGINRENDERPASS:
4452        case CMD_NEXTSUBPASS:
4453        case CMD_ENDRENDERPASS:
4454            skipCall |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
4455            break;
4456        case CMD_DISPATCH:
4457        case CMD_DISPATCHINDIRECT:
4458            skipCall |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4459            break;
4460        case CMD_COPYBUFFER:
4461        case CMD_COPYIMAGE:
4462        case CMD_COPYBUFFERTOIMAGE:
4463        case CMD_COPYIMAGETOBUFFER:
4464        case CMD_CLONEIMAGEDATA:
4465        case CMD_UPDATEBUFFER:
4466        case CMD_PIPELINEBARRIER:
4467        case CMD_EXECUTECOMMANDS:
4468            break;
4469        default:
4470            break;
4471        }
4472    }
4473    if (pCB->state != CB_RECORDING) {
4474        skipCall |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
4475        skipCall |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
4476        CMD_NODE cmdNode = {};
4477        // init cmd node and append to end of cmd LL
4478        cmdNode.cmdNumber = ++pCB->numCmds;
4479        cmdNode.type = cmd;
4480        pCB->cmds.push_back(cmdNode);
4481    }
4482    return skipCall;
4483}
4484// Reset the command buffer state
4485//  Maintain the createInfo and set state to CB_NEW, but clear all other state
4486static void resetCB(layer_data *my_data, const VkCommandBuffer cb) {
4487    GLOBAL_CB_NODE *pCB = my_data->commandBufferMap[cb];
4488    if (pCB) {
4489        pCB->cmds.clear();
4490        // Reset CB state (note that createInfo is not cleared)
4491        pCB->commandBuffer = cb;
4492        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
4493        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
4494        pCB->numCmds = 0;
4495        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
4496        pCB->state = CB_NEW;
4497        pCB->submitCount = 0;
4498        pCB->status = 0;
4499        pCB->viewports.clear();
4500        pCB->scissors.clear();
4501        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4502            // Before clearing lastBoundState, remove any CB bindings from all uniqueBoundSets
4503            for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4504                auto set_node = my_data->setMap.find(set);
4505                if (set_node != my_data->setMap.end()) {
4506                    set_node->second->boundCmdBuffers.erase(pCB->commandBuffer);
4507                }
4508            }
4509            pCB->lastBound[i].reset();
4510        }
4511        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
4512        pCB->activeRenderPass = 0;
4513        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
4514        pCB->activeSubpass = 0;
4515        pCB->framebuffer = 0;
4516        pCB->fenceId = 0;
4517        pCB->lastSubmittedFence = VK_NULL_HANDLE;
4518        pCB->lastSubmittedQueue = VK_NULL_HANDLE;
4519        pCB->destroyedSets.clear();
4520        pCB->updatedSets.clear();
4521        pCB->destroyedFramebuffers.clear();
4522        pCB->waitedEvents.clear();
4523        pCB->semaphores.clear();
4524        pCB->events.clear();
4525        pCB->waitedEventsBeforeQueryReset.clear();
4526        pCB->queryToStateMap.clear();
4527        pCB->activeQueries.clear();
4528        pCB->startedQueries.clear();
4529        pCB->imageLayoutMap.clear();
4530        pCB->eventToStageMap.clear();
4531        pCB->drawData.clear();
4532        pCB->currentDrawData.buffers.clear();
4533        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
4534        pCB->secondaryCommandBuffers.clear();
4535        pCB->updateImages.clear();
4536        pCB->updateBuffers.clear();
4537        pCB->validate_functions.clear();
4538        pCB->memObjs.clear();
4539        pCB->eventUpdates.clear();
4540    }
4541}
4542
4543// Set PSO-related status bits for CB, including dynamic state set via PSO
4544static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
4545    // Account for any dynamic state not set via this PSO
4546    if (!pPipe->dynStateCI.dynamicStateCount) { // All state is static
4547        pCB->status = CBSTATUS_ALL;
4548    } else {
4549        // First consider all state on
4550        // Then unset any state that's noted as dynamic in PSO
4551        // Finally OR that into CB statemask
4552        CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
4553        for (uint32_t i = 0; i < pPipe->dynStateCI.dynamicStateCount; i++) {
4554            switch (pPipe->dynStateCI.pDynamicStates[i]) {
4555            case VK_DYNAMIC_STATE_VIEWPORT:
4556                psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET;
4557                break;
4558            case VK_DYNAMIC_STATE_SCISSOR:
4559                psoDynStateMask &= ~CBSTATUS_SCISSOR_SET;
4560                break;
4561            case VK_DYNAMIC_STATE_LINE_WIDTH:
4562                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
4563                break;
4564            case VK_DYNAMIC_STATE_DEPTH_BIAS:
4565                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
4566                break;
4567            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
4568                psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
4569                break;
4570            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
4571                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
4572                break;
4573            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
4574                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4575                break;
4576            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4577                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4578                break;
4579            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4580                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4581                break;
4582            default:
4583                // TODO : Flag error here
4584                break;
4585            }
4586        }
4587        pCB->status |= psoDynStateMask;
4588    }
4589}
4590
4591// Print the last bound Gfx Pipeline
4592static VkBool32 printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
4593    VkBool32 skipCall = VK_FALSE;
4594    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4595    if (pCB) {
4596        PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
4597        if (!pPipeTrav) {
4598            // nothing to print
4599        } else {
4600            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4601                                __LINE__, DRAWSTATE_NONE, "DS", "%s",
4602                                vk_print_vkgraphicspipelinecreateinfo(&pPipeTrav->graphicsPipelineCI, "{DS}").c_str());
4603        }
4604    }
4605    return skipCall;
4606}
4607
4608static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
4609    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4610    if (pCB && pCB->cmds.size() > 0) {
4611        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4612                DRAWSTATE_NONE, "DS", "Cmds in CB %p", (void *)cb);
4613        vector<CMD_NODE> cmds = pCB->cmds;
4614        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
4615            // TODO : Need to pass cb as srcObj here
4616            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4617                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD#%" PRIu64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
4618        }
4619    } else {
4620        // Nothing to print
4621    }
4622}
4623
4624static VkBool32 synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
4625    VkBool32 skipCall = VK_FALSE;
4626    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
4627        return skipCall;
4628    }
4629    skipCall |= printPipeline(my_data, cb);
4630    return skipCall;
4631}
4632
4633// Flags validation error if the associated call is made inside a render pass. The apiName
4634// routine should ONLY be called outside a render pass.
4635static VkBool32 insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4636    VkBool32 inside = VK_FALSE;
4637    if (pCB->activeRenderPass) {
4638        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4639                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
4640                         "%s: It is invalid to issue this call inside an active render pass (%#" PRIxLEAST64 ")", apiName,
4641                         (uint64_t)pCB->activeRenderPass);
4642    }
4643    return inside;
4644}
4645
4646// Flags validation error if the associated call is made outside a render pass. The apiName
4647// routine should ONLY be called inside a render pass.
4648static VkBool32 outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4649    VkBool32 outside = VK_FALSE;
4650    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4651        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4652         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4653        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4654                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
4655                          "%s: This call must be issued inside an active render pass.", apiName);
4656    }
4657    return outside;
4658}
4659
4660static void init_core_validation(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
4661
4662    layer_debug_actions(my_data->report_data, my_data->logging_callback, pAllocator, "lunarg_core_validation");
4663
4664    if (!globalLockInitialized) {
4665        loader_platform_thread_create_mutex(&globalLock);
4666        globalLockInitialized = 1;
4667    }
4668#if MTMERGESOURCE
4669    // Zero out memory property data
4670    memset(&memProps, 0, sizeof(VkPhysicalDeviceMemoryProperties));
4671#endif
4672}
4673
4674VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4675vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
4676    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4677
4678    assert(chain_info->u.pLayerInfo);
4679    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4680    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4681    if (fpCreateInstance == NULL)
4682        return VK_ERROR_INITIALIZATION_FAILED;
4683
4684    // Advance the link info for the next element on the chain
4685    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4686
4687    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4688    if (result != VK_SUCCESS)
4689        return result;
4690
4691    layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
4692    my_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
4693    layer_init_instance_dispatch_table(*pInstance, my_data->instance_dispatch_table, fpGetInstanceProcAddr);
4694
4695    my_data->report_data = debug_report_create_instance(my_data->instance_dispatch_table, *pInstance,
4696                                                        pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
4697
4698    init_core_validation(my_data, pAllocator);
4699
4700    ValidateLayerOrdering(*pCreateInfo);
4701
4702    return result;
4703}
4704
4705/* hook DestroyInstance to remove tableInstanceMap entry */
4706VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
4707    // TODOSC : Shouldn't need any customization here
4708    dispatch_key key = get_dispatch_key(instance);
4709    // TBD: Need any locking this early, in case this function is called at the
4710    // same time by more than one thread?
4711    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4712    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
4713    pTable->DestroyInstance(instance, pAllocator);
4714
4715    loader_platform_thread_lock_mutex(&globalLock);
4716    // Clean up logging callback, if any
4717    while (my_data->logging_callback.size() > 0) {
4718        VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
4719        layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
4720        my_data->logging_callback.pop_back();
4721    }
4722
4723    layer_debug_report_destroy_instance(my_data->report_data);
4724    delete my_data->instance_dispatch_table;
4725    layer_data_map.erase(key);
4726    loader_platform_thread_unlock_mutex(&globalLock);
4727    if (layer_data_map.empty()) {
4728        // Release mutex when destroying last instance.
4729        loader_platform_thread_delete_mutex(&globalLock);
4730        globalLockInitialized = 0;
4731    }
4732}
4733
4734static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
4735    uint32_t i;
4736    // TBD: Need any locking, in case this function is called at the same time
4737    // by more than one thread?
4738    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4739    dev_data->device_extensions.wsi_enabled = false;
4740
4741    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4742    PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
4743    pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
4744    pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
4745    pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
4746    pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
4747    pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
4748
4749    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4750        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4751            dev_data->device_extensions.wsi_enabled = true;
4752    }
4753}
4754
4755VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4756                                                              const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4757    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4758
4759    assert(chain_info->u.pLayerInfo);
4760    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4761    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4762    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
4763    if (fpCreateDevice == NULL) {
4764        return VK_ERROR_INITIALIZATION_FAILED;
4765    }
4766
4767    // Advance the link info for the next element on the chain
4768    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4769
4770    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4771    if (result != VK_SUCCESS) {
4772        return result;
4773    }
4774
4775    loader_platform_thread_lock_mutex(&globalLock);
4776    layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
4777    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4778
4779    // Setup device dispatch table
4780    my_device_data->device_dispatch_table = new VkLayerDispatchTable;
4781    layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
4782    my_device_data->device = *pDevice;
4783
4784    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4785    createDeviceRegisterExtensions(pCreateInfo, *pDevice);
4786    // Get physical device limits for this device
4787    my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->physDevProperties.properties));
4788    uint32_t count;
4789    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4790    my_device_data->physDevProperties.queue_family_properties.resize(count);
4791    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
4792        gpu, &count, &my_device_data->physDevProperties.queue_family_properties[0]);
4793    // TODO: device limits should make sure these are compatible
4794    if (pCreateInfo->pEnabledFeatures) {
4795        my_device_data->physDevProperties.features = *pCreateInfo->pEnabledFeatures;
4796    } else {
4797        memset(&my_device_data->physDevProperties.features, 0, sizeof(VkPhysicalDeviceFeatures));
4798    }
4799    loader_platform_thread_unlock_mutex(&globalLock);
4800
4801    ValidateLayerOrdering(*pCreateInfo);
4802
4803    return result;
4804}
4805
4806// prototype
4807static void deleteRenderPasses(layer_data *);
4808VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4809    // TODOSC : Shouldn't need any customization here
4810    dispatch_key key = get_dispatch_key(device);
4811    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4812    // Free all the memory
4813    loader_platform_thread_lock_mutex(&globalLock);
4814    deletePipelines(dev_data);
4815    deleteRenderPasses(dev_data);
4816    deleteCommandBuffers(dev_data);
4817    deletePools(dev_data);
4818    deleteLayouts(dev_data);
4819    dev_data->imageViewMap.clear();
4820    dev_data->imageMap.clear();
4821    dev_data->imageSubresourceMap.clear();
4822    dev_data->imageLayoutMap.clear();
4823    dev_data->bufferViewMap.clear();
4824    dev_data->bufferMap.clear();
4825    loader_platform_thread_unlock_mutex(&globalLock);
4826#if MTMERGESOURCE
4827    VkBool32 skipCall = VK_FALSE;
4828    loader_platform_thread_lock_mutex(&globalLock);
4829    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4830            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4831    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4832            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4833    print_mem_list(dev_data, device);
4834    printCBList(dev_data, device);
4835    delete_cmd_buf_info_list(dev_data);
4836    // Report any memory leaks
4837    DEVICE_MEM_INFO *pInfo = NULL;
4838    if (dev_data->memObjMap.size() > 0) {
4839        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4840            pInfo = &(*ii).second;
4841            if (pInfo->allocInfo.allocationSize != 0) {
4842                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4843                skipCall |=
4844                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4845                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK,
4846                            "MEM", "Mem Object %" PRIu64 " has not been freed. You should clean up this memory by calling "
4847                                   "vkFreeMemory(%" PRIu64 ") prior to vkDestroyDevice().",
4848                            (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4849            }
4850        }
4851    }
4852    // Queues persist until device is destroyed
4853    delete_queue_info_list(dev_data);
4854    layer_debug_report_destroy_device(device);
4855    loader_platform_thread_unlock_mutex(&globalLock);
4856
4857#if DISPATCH_MAP_DEBUG
4858    fprintf(stderr, "Device: %p, key: %p\n", device, key);
4859#endif
4860    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4861    if (VK_FALSE == skipCall) {
4862        pDisp->DestroyDevice(device, pAllocator);
4863    }
4864#else
4865    dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
4866#endif
4867    delete dev_data->device_dispatch_table;
4868    layer_data_map.erase(key);
4869}
4870
4871#if MTMERGESOURCE
4872VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
4873vkGetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties *pMemoryProperties) {
4874    layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
4875    VkLayerInstanceDispatchTable *pInstanceTable = my_data->instance_dispatch_table;
4876    pInstanceTable->GetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties);
4877    memcpy(&memProps, pMemoryProperties, sizeof(VkPhysicalDeviceMemoryProperties));
4878}
4879#endif
4880
4881static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4882
4883VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4884vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
4885    return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
4886}
4887
4888VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4889vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
4890    return util_GetLayerProperties(ARRAY_SIZE(cv_global_layers), cv_global_layers, pCount, pProperties);
4891}
4892
4893// TODO: Why does this exist - can we just use global?
4894static const VkLayerProperties cv_device_layers[] = {{
4895    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
4896}};
4897
4898VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
4899                                                                                    const char *pLayerName, uint32_t *pCount,
4900                                                                                    VkExtensionProperties *pProperties) {
4901    if (pLayerName == NULL) {
4902        dispatch_key key = get_dispatch_key(physicalDevice);
4903        layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4904        return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
4905    } else {
4906        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
4907    }
4908}
4909
4910VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4911vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
4912    /* draw_state physical device layers are the same as global */
4913    return util_GetLayerProperties(ARRAY_SIZE(cv_device_layers), cv_device_layers, pCount, pProperties);
4914}
4915
4916// This validates that the initial layout specified in the command buffer for
4917// the IMAGE is the same
4918// as the global IMAGE layout
4919VkBool32 ValidateCmdBufImageLayouts(VkCommandBuffer cmdBuffer) {
4920    VkBool32 skip_call = VK_FALSE;
4921    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
4922    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
4923    for (auto cb_image_data : pCB->imageLayoutMap) {
4924        VkImageLayout imageLayout;
4925        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4926            skip_call |=
4927                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4928                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image %" PRIu64 ".",
4929                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4930        } else {
4931            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4932                // TODO: Set memory invalid which is in mem_tracker currently
4933            } else if (imageLayout != cb_image_data.second.initialLayout) {
4934                skip_call |=
4935                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4936                            reinterpret_cast<uint64_t &>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4937                            "Cannot submit cmd buffer using image (%" PRIx64 ") with layout %s when "
4938                            "first use is %s.",
4939                            reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4940                            string_VkImageLayout(cb_image_data.second.initialLayout));
4941            }
4942            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4943        }
4944    }
4945    return skip_call;
4946}
4947
4948// Track which resources are in-flight by atomically incrementing their "in_use" count
4949VkBool32 validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB) {
4950    VkBool32 skip_call = VK_FALSE;
4951    for (auto drawDataElement : pCB->drawData) {
4952        for (auto buffer : drawDataElement.buffers) {
4953            auto buffer_data = my_data->bufferMap.find(buffer);
4954            if (buffer_data == my_data->bufferMap.end()) {
4955                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4956                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4957                                     "Cannot submit cmd buffer using deleted buffer %" PRIu64 ".", (uint64_t)(buffer));
4958            } else {
4959                buffer_data->second.in_use.fetch_add(1);
4960            }
4961        }
4962    }
4963    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4964        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4965            auto setNode = my_data->setMap.find(set);
4966            if (setNode == my_data->setMap.end()) {
4967                skip_call |=
4968                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4969                            (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS",
4970                            "Cannot submit cmd buffer using deleted descriptor set %" PRIu64 ".", (uint64_t)(set));
4971            } else {
4972                setNode->second->in_use.fetch_add(1);
4973            }
4974        }
4975    }
4976    for (auto semaphore : pCB->semaphores) {
4977        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4978        if (semaphoreNode == my_data->semaphoreMap.end()) {
4979            skip_call |=
4980                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4981                        reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4982                        "Cannot submit cmd buffer using deleted semaphore %" PRIu64 ".", reinterpret_cast<uint64_t &>(semaphore));
4983        } else {
4984            semaphoreNode->second.in_use.fetch_add(1);
4985        }
4986    }
4987    for (auto event : pCB->events) {
4988        auto eventNode = my_data->eventMap.find(event);
4989        if (eventNode == my_data->eventMap.end()) {
4990            skip_call |=
4991                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4992                        reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4993                        "Cannot submit cmd buffer using deleted event %" PRIu64 ".", reinterpret_cast<uint64_t &>(event));
4994        } else {
4995            eventNode->second.in_use.fetch_add(1);
4996        }
4997    }
4998    return skip_call;
4999}
5000
5001void decrementResources(layer_data *my_data, VkCommandBuffer cmdBuffer) {
5002    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
5003    for (auto drawDataElement : pCB->drawData) {
5004        for (auto buffer : drawDataElement.buffers) {
5005            auto buffer_data = my_data->bufferMap.find(buffer);
5006            if (buffer_data != my_data->bufferMap.end()) {
5007                buffer_data->second.in_use.fetch_sub(1);
5008            }
5009        }
5010    }
5011    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
5012        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
5013            auto setNode = my_data->setMap.find(set);
5014            if (setNode != my_data->setMap.end()) {
5015                setNode->second->in_use.fetch_sub(1);
5016            }
5017        }
5018    }
5019    for (auto semaphore : pCB->semaphores) {
5020        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
5021        if (semaphoreNode != my_data->semaphoreMap.end()) {
5022            semaphoreNode->second.in_use.fetch_sub(1);
5023        }
5024    }
5025    for (auto event : pCB->events) {
5026        auto eventNode = my_data->eventMap.find(event);
5027        if (eventNode != my_data->eventMap.end()) {
5028            eventNode->second.in_use.fetch_sub(1);
5029        }
5030    }
5031    for (auto queryStatePair : pCB->queryToStateMap) {
5032        my_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
5033    }
5034    for (auto eventStagePair : pCB->eventToStageMap) {
5035        my_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
5036    }
5037}
5038
5039void decrementResources(layer_data *my_data, uint32_t fenceCount, const VkFence *pFences) {
5040    for (uint32_t i = 0; i < fenceCount; ++i) {
5041        auto fence_data = my_data->fenceMap.find(pFences[i]);
5042        if (fence_data == my_data->fenceMap.end() || !fence_data->second.needsSignaled)
5043            return;
5044        fence_data->second.needsSignaled = false;
5045        fence_data->second.in_use.fetch_sub(1);
5046        decrementResources(my_data, fence_data->second.priorFences.size(), fence_data->second.priorFences.data());
5047        for (auto cmdBuffer : fence_data->second.cmdBuffers) {
5048            decrementResources(my_data, cmdBuffer);
5049        }
5050    }
5051}
5052
5053void decrementResources(layer_data *my_data, VkQueue queue) {
5054    auto queue_data = my_data->queueMap.find(queue);
5055    if (queue_data != my_data->queueMap.end()) {
5056        for (auto cmdBuffer : queue_data->second.untrackedCmdBuffers) {
5057            decrementResources(my_data, cmdBuffer);
5058        }
5059        queue_data->second.untrackedCmdBuffers.clear();
5060        decrementResources(my_data, queue_data->second.lastFences.size(), queue_data->second.lastFences.data());
5061    }
5062}
5063
5064void updateTrackedCommandBuffers(layer_data *dev_data, VkQueue queue, VkQueue other_queue, VkFence fence) {
5065    if (queue == other_queue) {
5066        return;
5067    }
5068    auto queue_data = dev_data->queueMap.find(queue);
5069    auto other_queue_data = dev_data->queueMap.find(other_queue);
5070    if (queue_data == dev_data->queueMap.end() || other_queue_data == dev_data->queueMap.end()) {
5071        return;
5072    }
5073    for (auto fenceInner : other_queue_data->second.lastFences) {
5074        queue_data->second.lastFences.push_back(fenceInner);
5075    }
5076    if (fence != VK_NULL_HANDLE) {
5077        auto fence_data = dev_data->fenceMap.find(fence);
5078        if (fence_data == dev_data->fenceMap.end()) {
5079            return;
5080        }
5081        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
5082            fence_data->second.cmdBuffers.push_back(cmdbuffer);
5083        }
5084        other_queue_data->second.untrackedCmdBuffers.clear();
5085    } else {
5086        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
5087            queue_data->second.untrackedCmdBuffers.push_back(cmdbuffer);
5088        }
5089        other_queue_data->second.untrackedCmdBuffers.clear();
5090    }
5091    for (auto eventStagePair : other_queue_data->second.eventToStageMap) {
5092        queue_data->second.eventToStageMap[eventStagePair.first] = eventStagePair.second;
5093    }
5094}
5095
5096void trackCommandBuffers(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
5097    auto queue_data = my_data->queueMap.find(queue);
5098    if (fence != VK_NULL_HANDLE) {
5099        vector<VkFence> prior_fences;
5100        auto fence_data = my_data->fenceMap.find(fence);
5101        if (fence_data == my_data->fenceMap.end()) {
5102            return;
5103        }
5104        if (queue_data != my_data->queueMap.end()) {
5105            prior_fences = queue_data->second.lastFences;
5106            queue_data->second.lastFences.clear();
5107            queue_data->second.lastFences.push_back(fence);
5108            for (auto cmdbuffer : queue_data->second.untrackedCmdBuffers) {
5109                fence_data->second.cmdBuffers.push_back(cmdbuffer);
5110            }
5111            queue_data->second.untrackedCmdBuffers.clear();
5112        }
5113        fence_data->second.cmdBuffers.clear();
5114        fence_data->second.priorFences = prior_fences;
5115        fence_data->second.needsSignaled = true;
5116        fence_data->second.queue = queue;
5117        fence_data->second.in_use.fetch_add(1);
5118        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5119            const VkSubmitInfo *submit = &pSubmits[submit_idx];
5120            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5121                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5122                    fence_data->second.cmdBuffers.push_back(secondaryCmdBuffer);
5123                }
5124                fence_data->second.cmdBuffers.push_back(submit->pCommandBuffers[i]);
5125            }
5126        }
5127    } else {
5128        if (queue_data != my_data->queueMap.end()) {
5129            for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5130                const VkSubmitInfo *submit = &pSubmits[submit_idx];
5131                for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5132                    for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5133                        queue_data->second.untrackedCmdBuffers.push_back(secondaryCmdBuffer);
5134                    }
5135                    queue_data->second.untrackedCmdBuffers.push_back(submit->pCommandBuffers[i]);
5136                }
5137            }
5138        }
5139    }
5140    if (queue_data != my_data->queueMap.end()) {
5141        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5142            const VkSubmitInfo *submit = &pSubmits[submit_idx];
5143            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5144                // Add cmdBuffers to both the global set and queue set
5145                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5146                    my_data->globalInFlightCmdBuffers.insert(secondaryCmdBuffer);
5147                    queue_data->second.inFlightCmdBuffers.insert(secondaryCmdBuffer);
5148                }
5149                my_data->globalInFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
5150                queue_data->second.inFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
5151            }
5152        }
5153    }
5154}
5155
5156bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5157    bool skip_call = false;
5158    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
5159        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
5160        skip_call |=
5161            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5162                    __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
5163                    "Command Buffer %#" PRIx64 " is already in use and is not marked for simultaneous use.",
5164                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
5165    }
5166    return skip_call;
5167}
5168
5169static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5170    bool skipCall = false;
5171    // Validate that cmd buffers have been updated
5172    if (CB_RECORDED != pCB->state) {
5173        if (CB_INVALID == pCB->state) {
5174            // Inform app of reason CB invalid
5175            bool causeReported = false;
5176            if (!pCB->destroyedSets.empty()) {
5177                std::stringstream set_string;
5178                for (auto set : pCB->destroyedSets)
5179                    set_string << " " << set;
5180
5181                skipCall |=
5182                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5183                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5184                            "You are submitting command buffer %#" PRIxLEAST64
5185                            " that is invalid because it had the following bound descriptor set(s) destroyed: %s",
5186                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5187                causeReported = true;
5188            }
5189            if (!pCB->updatedSets.empty()) {
5190                std::stringstream set_string;
5191                for (auto set : pCB->updatedSets)
5192                    set_string << " " << set;
5193
5194                skipCall |=
5195                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5196                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5197                            "You are submitting command buffer %#" PRIxLEAST64
5198                            " that is invalid because it had the following bound descriptor set(s) updated: %s",
5199                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5200                causeReported = true;
5201            }
5202            if (!pCB->destroyedFramebuffers.empty()) {
5203                std::stringstream fb_string;
5204                for (auto fb : pCB->destroyedFramebuffers)
5205                    fb_string << " " << fb;
5206
5207                skipCall |=
5208                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5209                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5210                            "You are submitting command buffer %#" PRIxLEAST64 " that is invalid because it had the following "
5211                            "referenced framebuffers destroyed: %s",
5212                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), fb_string.str().c_str());
5213                causeReported = true;
5214            }
5215            // TODO : This is defensive programming to make sure an error is
5216            //  flagged if we hit this INVALID cmd buffer case and none of the
5217            //  above cases are hit. As the number of INVALID cases grows, this
5218            //  code should be updated to seemlessly handle all the cases.
5219            if (!causeReported) {
5220                skipCall |= log_msg(
5221                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5222                    reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5223                    "You are submitting command buffer %#" PRIxLEAST64 " that is invalid due to an unknown cause. Validation "
5224                    "should "
5225                    "be improved to report the exact cause.",
5226                    reinterpret_cast<uint64_t &>(pCB->commandBuffer));
5227            }
5228        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
5229            skipCall |=
5230                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5231                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
5232                        "You must call vkEndCommandBuffer() on CB %#" PRIxLEAST64 " before this call to vkQueueSubmit()!",
5233                        (uint64_t)(pCB->commandBuffer));
5234        }
5235    }
5236    return skipCall;
5237}
5238
5239static VkBool32 validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5240    // Track in-use for resources off of primary and any secondary CBs
5241    VkBool32 skipCall = validateAndIncrementResources(dev_data, pCB);
5242    if (!pCB->secondaryCommandBuffers.empty()) {
5243        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
5244            skipCall |= validateAndIncrementResources(dev_data, dev_data->commandBufferMap[secondaryCmdBuffer]);
5245            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
5246            if (pSubCB->primaryCommandBuffer != pCB->commandBuffer) {
5247                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5248                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5249                        "CB %#" PRIxLEAST64 " was submitted with secondary buffer %#" PRIxLEAST64
5250                        " but that buffer has subsequently been bound to "
5251                        "primary cmd buffer %#" PRIxLEAST64 ".",
5252                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
5253                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
5254            }
5255        }
5256    }
5257    // TODO : Verify if this also needs to be checked for secondary command
5258    //  buffers. If so, this block of code can move to
5259    //   validateCommandBufferState() function. vulkan GL106 filed to clarify
5260    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
5261        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5262                            __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5263                            "CB %#" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
5264                            "set, but has been submitted %#" PRIxLEAST64 " times.",
5265                            (uint64_t)(pCB->commandBuffer), pCB->submitCount);
5266    }
5267    skipCall |= validateCommandBufferState(dev_data, pCB);
5268    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
5269    // on device
5270    skipCall |= validateCommandBufferSimultaneousUse(dev_data, pCB);
5271    return skipCall;
5272}
5273
5274VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5275vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
5276    VkBool32 skipCall = VK_FALSE;
5277    GLOBAL_CB_NODE *pCBNode = NULL;
5278    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5279    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5280    loader_platform_thread_lock_mutex(&globalLock);
5281#if MTMERGESOURCE
5282    // TODO : Need to track fence and clear mem references when fence clears
5283    // MTMTODO : Merge this code with code below to avoid duplicating efforts
5284    uint64_t fenceId = 0;
5285    skipCall = add_fence_info(dev_data, fence, queue, &fenceId);
5286
5287    print_mem_list(dev_data, queue);
5288    printCBList(dev_data, queue);
5289    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5290        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5291        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5292            pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5293            if (pCBNode) {
5294                pCBNode->fenceId = fenceId;
5295                pCBNode->lastSubmittedFence = fence;
5296                pCBNode->lastSubmittedQueue = queue;
5297                for (auto &function : pCBNode->validate_functions) {
5298                    skipCall |= function();
5299                }
5300                for (auto &function : pCBNode->eventUpdates) {
5301                    skipCall |= static_cast<VkBool32>(function(queue));
5302                }
5303            }
5304        }
5305
5306        for (uint32_t i = 0; i < submit->waitSemaphoreCount; i++) {
5307            VkSemaphore sem = submit->pWaitSemaphores[i];
5308
5309            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5310                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_SIGNALLED) {
5311                    skipCall =
5312                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5313                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
5314                                "vkQueueSubmit: Semaphore must be in signaled state before passing to pWaitSemaphores");
5315                }
5316                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_WAIT;
5317            }
5318        }
5319        for (uint32_t i = 0; i < submit->signalSemaphoreCount; i++) {
5320            VkSemaphore sem = submit->pSignalSemaphores[i];
5321
5322            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5323                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
5324                    skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5325                                       VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, (uint64_t)sem, __LINE__, MEMTRACK_NONE,
5326                                       "SEMAPHORE", "vkQueueSubmit: Semaphore must not be currently signaled or in a wait state");
5327                }
5328                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
5329            }
5330        }
5331    }
5332#endif
5333    // First verify that fence is not in use
5334    if ((fence != VK_NULL_HANDLE) && (submitCount != 0) && dev_data->fenceMap[fence].in_use.load()) {
5335        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5336                            (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5337                            "Fence %#" PRIx64 " is already in use by another submission.", (uint64_t)(fence));
5338    }
5339    // Now verify each individual submit
5340    std::unordered_set<VkQueue> processed_other_queues;
5341    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5342        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5343        vector<VkSemaphore> semaphoreList;
5344        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
5345            const VkSemaphore &semaphore = submit->pWaitSemaphores[i];
5346            if (dev_data->semaphoreMap[semaphore].signaled) {
5347                dev_data->semaphoreMap[semaphore].signaled = 0;
5348                dev_data->semaphoreMap[semaphore].in_use.fetch_sub(1);
5349            } else {
5350                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5351                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
5352                                    "DS", "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
5353                                    reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
5354            }
5355            const VkQueue &other_queue = dev_data->semaphoreMap[semaphore].queue;
5356            if (other_queue != VK_NULL_HANDLE && !processed_other_queues.count(other_queue)) {
5357                updateTrackedCommandBuffers(dev_data, queue, other_queue, fence);
5358                processed_other_queues.insert(other_queue);
5359            }
5360        }
5361        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
5362            const VkSemaphore &semaphore = submit->pSignalSemaphores[i];
5363            semaphoreList.push_back(semaphore);
5364            if (dev_data->semaphoreMap[semaphore].signaled) {
5365                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5366                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
5367                                    "DS", "Queue %#" PRIx64 " is signaling semaphore %#" PRIx64
5368                                          " that has already been signaled but not waited on by queue %#" PRIx64 ".",
5369                                    reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
5370                                    reinterpret_cast<uint64_t &>(dev_data->semaphoreMap[semaphore].queue));
5371            } else {
5372                dev_data->semaphoreMap[semaphore].signaled = 1;
5373                dev_data->semaphoreMap[semaphore].queue = queue;
5374            }
5375        }
5376        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5377            skipCall |= ValidateCmdBufImageLayouts(submit->pCommandBuffers[i]);
5378            pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5379            pCBNode->semaphores = semaphoreList;
5380            pCBNode->submitCount++; // increment submit count
5381            skipCall |= validatePrimaryCommandBufferState(dev_data, pCBNode);
5382        }
5383    }
5384    // Update cmdBuffer-related data structs and mark fence in-use
5385    trackCommandBuffers(dev_data, queue, submitCount, pSubmits, fence);
5386    loader_platform_thread_unlock_mutex(&globalLock);
5387    if (VK_FALSE == skipCall)
5388        result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence);
5389#if MTMERGESOURCE
5390    loader_platform_thread_lock_mutex(&globalLock);
5391    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5392        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5393        for (uint32_t i = 0; i < submit->waitSemaphoreCount; i++) {
5394            VkSemaphore sem = submit->pWaitSemaphores[i];
5395
5396            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5397                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
5398            }
5399        }
5400    }
5401    loader_platform_thread_unlock_mutex(&globalLock);
5402#endif
5403    return result;
5404}
5405
5406#if MTMERGESOURCE
5407VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
5408                                                                const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
5409    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5410    VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
5411    // TODO : Track allocations and overall size here
5412    loader_platform_thread_lock_mutex(&globalLock);
5413    add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
5414    print_mem_list(my_data, device);
5415    loader_platform_thread_unlock_mutex(&globalLock);
5416    return result;
5417}
5418
5419VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5420vkFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
5421    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5422
5423    // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
5424    // Before freeing a memory object, an application must ensure the memory object is no longer
5425    // in use by the device—for example by command buffers queued for execution. The memory need
5426    // not yet be unbound from all images and buffers, but any further use of those images or
5427    // buffers (on host or device) for anything other than destroying those objects will result in
5428    // undefined behavior.
5429
5430    loader_platform_thread_lock_mutex(&globalLock);
5431    freeMemObjInfo(my_data, device, mem, VK_FALSE);
5432    print_mem_list(my_data, device);
5433    printCBList(my_data, device);
5434    loader_platform_thread_unlock_mutex(&globalLock);
5435    my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator);
5436}
5437
5438VkBool32 validateMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5439    VkBool32 skipCall = VK_FALSE;
5440
5441    if (size == 0) {
5442        // TODO: a size of 0 is not listed as an invalid use in the spec, should it be?
5443        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5444                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5445                           "VkMapMemory: Attempting to map memory range of size zero");
5446    }
5447
5448    auto mem_element = my_data->memObjMap.find(mem);
5449    if (mem_element != my_data->memObjMap.end()) {
5450        // It is an application error to call VkMapMemory on an object that is already mapped
5451        if (mem_element->second.memRange.size != 0) {
5452            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5453                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5454                               "VkMapMemory: Attempting to map memory on an already-mapped object %#" PRIxLEAST64, (uint64_t)mem);
5455        }
5456
5457        // Validate that offset + size is within object's allocationSize
5458        if (size == VK_WHOLE_SIZE) {
5459            if (offset >= mem_element->second.allocInfo.allocationSize) {
5460                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5461                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5462                                   "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5463                                   mem_element->second.allocInfo.allocationSize, mem_element->second.allocInfo.allocationSize);
5464            }
5465        } else {
5466            if ((offset + size) > mem_element->second.allocInfo.allocationSize) {
5467                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5468                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5469                                   "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5470                                   size + offset, mem_element->second.allocInfo.allocationSize);
5471            }
5472        }
5473    }
5474    return skipCall;
5475}
5476
5477void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5478    auto mem_element = my_data->memObjMap.find(mem);
5479    if (mem_element != my_data->memObjMap.end()) {
5480        MemRange new_range;
5481        new_range.offset = offset;
5482        new_range.size = size;
5483        mem_element->second.memRange = new_range;
5484    }
5485}
5486
5487VkBool32 deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
5488    VkBool32 skipCall = VK_FALSE;
5489    auto mem_element = my_data->memObjMap.find(mem);
5490    if (mem_element != my_data->memObjMap.end()) {
5491        if (!mem_element->second.memRange.size) {
5492            // Valid Usage: memory must currently be mapped
5493            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5494                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5495                               "Unmapping Memory without memory being mapped: mem obj %#" PRIxLEAST64, (uint64_t)mem);
5496        }
5497        mem_element->second.memRange.size = 0;
5498        if (mem_element->second.pData) {
5499            free(mem_element->second.pData);
5500            mem_element->second.pData = 0;
5501        }
5502    }
5503    return skipCall;
5504}
5505
5506static char NoncoherentMemoryFillValue = 0xb;
5507
5508void initializeAndTrackMemory(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize size, void **ppData) {
5509    auto mem_element = my_data->memObjMap.find(mem);
5510    if (mem_element != my_data->memObjMap.end()) {
5511        mem_element->second.pDriverData = *ppData;
5512        uint32_t index = mem_element->second.allocInfo.memoryTypeIndex;
5513        if (memProps.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
5514            mem_element->second.pData = 0;
5515        } else {
5516            if (size == VK_WHOLE_SIZE) {
5517                size = mem_element->second.allocInfo.allocationSize;
5518            }
5519            size_t convSize = (size_t)(size);
5520            mem_element->second.pData = malloc(2 * convSize);
5521            memset(mem_element->second.pData, NoncoherentMemoryFillValue, 2 * convSize);
5522            *ppData = static_cast<char *>(mem_element->second.pData) + (convSize / 2);
5523        }
5524    }
5525}
5526#endif
5527// Note: This function assumes that the global lock is held by the calling
5528// thread.
5529VkBool32 cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
5530    VkBool32 skip_call = VK_FALSE;
5531    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
5532    if (pCB) {
5533        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
5534            for (auto event : queryEventsPair.second) {
5535                if (my_data->eventMap[event].needsSignaled) {
5536                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5537                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
5538                                         "Cannot get query results on queryPool %" PRIu64
5539                                         " with index %d which was guarded by unsignaled event %" PRIu64 ".",
5540                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
5541                }
5542            }
5543        }
5544    }
5545    return skip_call;
5546}
5547// Remove given cmd_buffer from the global inFlight set.
5548//  Also, if given queue is valid, then remove the cmd_buffer from that queues
5549//  inFlightCmdBuffer set. Finally, check all other queues and if given cmd_buffer
5550//  is still in flight on another queue, add it back into the global set.
5551// Note: This function assumes that the global lock is held by the calling
5552// thread.
5553static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkQueue queue) {
5554    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
5555    dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
5556    if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) {
5557        dev_data->queueMap[queue].inFlightCmdBuffers.erase(cmd_buffer);
5558        for (auto q : dev_data->queues) {
5559            if ((q != queue) &&
5560                (dev_data->queueMap[q].inFlightCmdBuffers.find(cmd_buffer) != dev_data->queueMap[q].inFlightCmdBuffers.end())) {
5561                dev_data->globalInFlightCmdBuffers.insert(cmd_buffer);
5562                break;
5563            }
5564        }
5565    }
5566}
5567#if MTMERGESOURCE
5568static inline bool verifyFenceStatus(VkDevice device, VkFence fence, const char *apiCall) {
5569    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5570    VkBool32 skipCall = false;
5571    auto pFenceInfo = my_data->fenceMap.find(fence);
5572    if (pFenceInfo != my_data->fenceMap.end()) {
5573        if (pFenceInfo->second.firstTimeFlag != VK_TRUE) {
5574            if ((pFenceInfo->second.createInfo.flags & VK_FENCE_CREATE_SIGNALED_BIT) &&
5575                pFenceInfo->second.firstTimeFlag != VK_TRUE) {
5576                skipCall |=
5577                    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5578                            (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5579                            "%s specified fence %#" PRIxLEAST64 " already in SIGNALED state.", apiCall, (uint64_t)fence);
5580            }
5581            if (!pFenceInfo->second.queue && !pFenceInfo->second.swapchain) { // Checking status of unsubmitted fence
5582                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5583                                    reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5584                                    "%s called for fence %#" PRIxLEAST64 " which has not been submitted on a Queue or during "
5585                                    "acquire next image.",
5586                                    apiCall, reinterpret_cast<uint64_t &>(fence));
5587            }
5588        } else {
5589            pFenceInfo->second.firstTimeFlag = VK_FALSE;
5590        }
5591    }
5592    return skipCall;
5593}
5594#endif
5595VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5596vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
5597    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5598    VkBool32 skip_call = VK_FALSE;
5599#if MTMERGESOURCE
5600    // Verify fence status of submitted fences
5601    loader_platform_thread_lock_mutex(&globalLock);
5602    for (uint32_t i = 0; i < fenceCount; i++) {
5603        skip_call |= verifyFenceStatus(device, pFences[i], "vkWaitForFences");
5604    }
5605    loader_platform_thread_unlock_mutex(&globalLock);
5606    if (skip_call)
5607        return VK_ERROR_VALIDATION_FAILED_EXT;
5608#endif
5609    VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
5610
5611    if (result == VK_SUCCESS) {
5612        loader_platform_thread_lock_mutex(&globalLock);
5613        // When we know that all fences are complete we can clean/remove their CBs
5614        if (waitAll || fenceCount == 1) {
5615            for (uint32_t i = 0; i < fenceCount; ++i) {
5616#if MTMERGESOURCE
5617                update_fence_tracking(dev_data, pFences[i]);
5618#endif
5619                VkQueue fence_queue = dev_data->fenceMap[pFences[i]].queue;
5620                for (auto cmdBuffer : dev_data->fenceMap[pFences[i]].cmdBuffers) {
5621                    skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5622                    removeInFlightCmdBuffer(dev_data, cmdBuffer, fence_queue);
5623                }
5624            }
5625            decrementResources(dev_data, fenceCount, pFences);
5626        }
5627        // NOTE : Alternate case not handled here is when some fences have completed. In
5628        //  this case for app to guarantee which fences completed it will have to call
5629        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
5630        loader_platform_thread_unlock_mutex(&globalLock);
5631    }
5632    if (VK_FALSE != skip_call)
5633        return VK_ERROR_VALIDATION_FAILED_EXT;
5634    return result;
5635}
5636
5637VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device, VkFence fence) {
5638    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5639    bool skipCall = false;
5640    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5641#if MTMERGESOURCE
5642    loader_platform_thread_lock_mutex(&globalLock);
5643    skipCall = verifyFenceStatus(device, fence, "vkGetFenceStatus");
5644    loader_platform_thread_unlock_mutex(&globalLock);
5645    if (skipCall)
5646        return result;
5647#endif
5648    result = dev_data->device_dispatch_table->GetFenceStatus(device, fence);
5649    VkBool32 skip_call = VK_FALSE;
5650    loader_platform_thread_lock_mutex(&globalLock);
5651    if (result == VK_SUCCESS) {
5652#if MTMERGESOURCE
5653        update_fence_tracking(dev_data, fence);
5654#endif
5655        auto fence_queue = dev_data->fenceMap[fence].queue;
5656        for (auto cmdBuffer : dev_data->fenceMap[fence].cmdBuffers) {
5657            skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5658            removeInFlightCmdBuffer(dev_data, cmdBuffer, fence_queue);
5659        }
5660        decrementResources(dev_data, 1, &fence);
5661    }
5662    loader_platform_thread_unlock_mutex(&globalLock);
5663    if (VK_FALSE != skip_call)
5664        return VK_ERROR_VALIDATION_FAILED_EXT;
5665    return result;
5666}
5667
5668VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
5669                                                            VkQueue *pQueue) {
5670    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5671    dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
5672    loader_platform_thread_lock_mutex(&globalLock);
5673
5674    // Add queue to tracking set only if it is new
5675    auto result = dev_data->queues.emplace(*pQueue);
5676    if (result.second == true) {
5677        QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
5678        pQNode->device = device;
5679#if MTMERGESOURCE
5680        pQNode->lastRetiredId = 0;
5681        pQNode->lastSubmittedId = 0;
5682#endif
5683    }
5684
5685    loader_platform_thread_unlock_mutex(&globalLock);
5686}
5687
5688VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue) {
5689    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5690    decrementResources(dev_data, queue);
5691    VkBool32 skip_call = VK_FALSE;
5692    loader_platform_thread_lock_mutex(&globalLock);
5693    // Iterate over local set since we erase set members as we go in for loop
5694    auto local_cb_set = dev_data->queueMap[queue].inFlightCmdBuffers;
5695    for (auto cmdBuffer : local_cb_set) {
5696        skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5697        removeInFlightCmdBuffer(dev_data, cmdBuffer, queue);
5698    }
5699    dev_data->queueMap[queue].inFlightCmdBuffers.clear();
5700    loader_platform_thread_unlock_mutex(&globalLock);
5701    if (VK_FALSE != skip_call)
5702        return VK_ERROR_VALIDATION_FAILED_EXT;
5703    VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue);
5704#if MTMERGESOURCE
5705    if (VK_SUCCESS == result) {
5706        loader_platform_thread_lock_mutex(&globalLock);
5707        retire_queue_fences(dev_data, queue);
5708        loader_platform_thread_unlock_mutex(&globalLock);
5709    }
5710#endif
5711    return result;
5712}
5713
5714VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device) {
5715    VkBool32 skip_call = VK_FALSE;
5716    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5717    loader_platform_thread_lock_mutex(&globalLock);
5718    for (auto queue : dev_data->queues) {
5719        decrementResources(dev_data, queue);
5720        if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) {
5721            // Clear all of the queue inFlightCmdBuffers (global set cleared below)
5722            dev_data->queueMap[queue].inFlightCmdBuffers.clear();
5723        }
5724    }
5725    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5726        skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5727    }
5728    dev_data->globalInFlightCmdBuffers.clear();
5729    loader_platform_thread_unlock_mutex(&globalLock);
5730    if (VK_FALSE != skip_call)
5731        return VK_ERROR_VALIDATION_FAILED_EXT;
5732    VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device);
5733#if MTMERGESOURCE
5734    if (VK_SUCCESS == result) {
5735        loader_platform_thread_lock_mutex(&globalLock);
5736        retire_device_fences(dev_data, device);
5737        loader_platform_thread_unlock_mutex(&globalLock);
5738    }
5739#endif
5740    return result;
5741}
5742
5743VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
5744    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5745    bool skipCall = false;
5746    loader_platform_thread_lock_mutex(&globalLock);
5747    if (dev_data->fenceMap[fence].in_use.load()) {
5748        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5749                            (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5750                            "Fence %#" PRIx64 " is in use by a command buffer.", (uint64_t)(fence));
5751    }
5752#if MTMERGESOURCE
5753    delete_fence_info(dev_data, fence);
5754    auto item = dev_data->fenceMap.find(fence);
5755    if (item != dev_data->fenceMap.end()) {
5756        dev_data->fenceMap.erase(item);
5757    }
5758#endif
5759    loader_platform_thread_unlock_mutex(&globalLock);
5760    if (!skipCall)
5761        dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
5762}
5763
5764VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5765vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5766    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5767    dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
5768    loader_platform_thread_lock_mutex(&globalLock);
5769    auto item = dev_data->semaphoreMap.find(semaphore);
5770    if (item != dev_data->semaphoreMap.end()) {
5771        if (item->second.in_use.load()) {
5772            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5773                    reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
5774                    "Cannot delete semaphore %" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore));
5775        }
5776        dev_data->semaphoreMap.erase(semaphore);
5777    }
5778    loader_platform_thread_unlock_mutex(&globalLock);
5779    // TODO : Clean up any internal data structures using this obj.
5780}
5781
5782VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5783    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5784    bool skip_call = false;
5785    loader_platform_thread_lock_mutex(&globalLock);
5786    auto event_data = dev_data->eventMap.find(event);
5787    if (event_data != dev_data->eventMap.end()) {
5788        if (event_data->second.in_use.load()) {
5789            skip_call |= log_msg(
5790                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5791                reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
5792                "Cannot delete event %" PRIx64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event));
5793        }
5794        dev_data->eventMap.erase(event_data);
5795    }
5796    loader_platform_thread_unlock_mutex(&globalLock);
5797    if (!skip_call)
5798        dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator);
5799    // TODO : Clean up any internal data structures using this obj.
5800}
5801
5802VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5803vkDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5804    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5805        ->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
5806    // TODO : Clean up any internal data structures using this obj.
5807}
5808
5809VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
5810                                                     uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
5811                                                     VkQueryResultFlags flags) {
5812    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5813    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
5814    GLOBAL_CB_NODE *pCB = nullptr;
5815    loader_platform_thread_lock_mutex(&globalLock);
5816    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5817        pCB = getCBNode(dev_data, cmdBuffer);
5818        for (auto queryStatePair : pCB->queryToStateMap) {
5819            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
5820        }
5821    }
5822    VkBool32 skip_call = VK_FALSE;
5823    for (uint32_t i = 0; i < queryCount; ++i) {
5824        QueryObject query = {queryPool, firstQuery + i};
5825        auto queryElement = queriesInFlight.find(query);
5826        auto queryToStateElement = dev_data->queryToStateMap.find(query);
5827        if (queryToStateElement != dev_data->queryToStateMap.end()) {
5828            // Available and in flight
5829            if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5830                queryToStateElement->second) {
5831                for (auto cmdBuffer : queryElement->second) {
5832                    pCB = getCBNode(dev_data, cmdBuffer);
5833                    auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
5834                    if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
5835                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5836                                             VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5837                                             "Cannot get query results on queryPool %" PRIu64 " with index %d which is in flight.",
5838                                             (uint64_t)(queryPool), firstQuery + i);
5839                    } else {
5840                        for (auto event : queryEventElement->second) {
5841                            dev_data->eventMap[event].needsSignaled = true;
5842                        }
5843                    }
5844                }
5845                // Unavailable and in flight
5846            } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5847                       !queryToStateElement->second) {
5848                // TODO : Can there be the same query in use by multiple command buffers in flight?
5849                bool make_available = false;
5850                for (auto cmdBuffer : queryElement->second) {
5851                    pCB = getCBNode(dev_data, cmdBuffer);
5852                    make_available |= pCB->queryToStateMap[query];
5853                }
5854                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5855                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5856                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5857                                         "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5858                                         (uint64_t)(queryPool), firstQuery + i);
5859                }
5860                // Unavailable
5861            } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5862                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5863                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5864                                     "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5865                                     (uint64_t)(queryPool), firstQuery + i);
5866                // Unitialized
5867            } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5868                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5869                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5870                                     "Cannot get query results on queryPool %" PRIu64
5871                                     " with index %d as data has not been collected for this index.",
5872                                     (uint64_t)(queryPool), firstQuery + i);
5873            }
5874        }
5875    }
5876    loader_platform_thread_unlock_mutex(&globalLock);
5877    if (skip_call)
5878        return VK_ERROR_VALIDATION_FAILED_EXT;
5879    return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride,
5880                                                                flags);
5881}
5882
5883VkBool32 validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5884    VkBool32 skip_call = VK_FALSE;
5885    auto buffer_data = my_data->bufferMap.find(buffer);
5886    if (buffer_data == my_data->bufferMap.end()) {
5887        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5888                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5889                             "Cannot free buffer %" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5890    } else {
5891        if (buffer_data->second.in_use.load()) {
5892            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5893                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5894                                 "Cannot free buffer %" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5895        }
5896    }
5897    return skip_call;
5898}
5899
5900VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5901vkDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
5902    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5903    VkBool32 skipCall = VK_FALSE;
5904    loader_platform_thread_lock_mutex(&globalLock);
5905#if MTMERGESOURCE
5906    auto item = dev_data->bufferBindingMap.find((uint64_t)buffer);
5907    if (item != dev_data->bufferBindingMap.end()) {
5908        skipCall = clear_object_binding(dev_data, device, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5909        dev_data->bufferBindingMap.erase(item);
5910    }
5911#endif
5912    if (!validateIdleBuffer(dev_data, buffer) && (VK_FALSE == skipCall)) {
5913        loader_platform_thread_unlock_mutex(&globalLock);
5914        dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator);
5915        loader_platform_thread_lock_mutex(&globalLock);
5916    }
5917    dev_data->bufferMap.erase(buffer);
5918    loader_platform_thread_unlock_mutex(&globalLock);
5919}
5920
5921VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5922vkDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5923    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5924    dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
5925    loader_platform_thread_lock_mutex(&globalLock);
5926    auto item = dev_data->bufferViewMap.find(bufferView);
5927    if (item != dev_data->bufferViewMap.end()) {
5928        dev_data->bufferViewMap.erase(item);
5929    }
5930    loader_platform_thread_unlock_mutex(&globalLock);
5931}
5932
5933VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5934    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5935    VkBool32 skipCall = VK_FALSE;
5936#if MTMERGESOURCE
5937    loader_platform_thread_lock_mutex(&globalLock);
5938    auto item = dev_data->imageBindingMap.find((uint64_t)image);
5939    if (item != dev_data->imageBindingMap.end()) {
5940        skipCall = clear_object_binding(dev_data, device, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5941        dev_data->imageBindingMap.erase(item);
5942    }
5943    loader_platform_thread_unlock_mutex(&globalLock);
5944#endif
5945    if (VK_FALSE == skipCall)
5946        dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
5947
5948    loader_platform_thread_lock_mutex(&globalLock);
5949    const auto& entry = dev_data->imageMap.find(image);
5950    if (entry != dev_data->imageMap.end()) {
5951        // Clear any memory mapping for this image
5952        const auto &mem_entry = dev_data->memObjMap.find(entry->second.mem);
5953        if (mem_entry != dev_data->memObjMap.end())
5954            mem_entry->second.image = VK_NULL_HANDLE;
5955
5956        // Remove image from imageMap
5957        dev_data->imageMap.erase(entry);
5958    }
5959    const auto& subEntry = dev_data->imageSubresourceMap.find(image);
5960    if (subEntry != dev_data->imageSubresourceMap.end()) {
5961        for (const auto& pair : subEntry->second) {
5962            dev_data->imageLayoutMap.erase(pair);
5963        }
5964        dev_data->imageSubresourceMap.erase(subEntry);
5965    }
5966    loader_platform_thread_unlock_mutex(&globalLock);
5967}
5968#if MTMERGESOURCE
5969VkBool32 print_memory_range_error(layer_data *dev_data, const uint64_t object_handle, const uint64_t other_handle,
5970                                  VkDebugReportObjectTypeEXT object_type) {
5971    if (object_type == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) {
5972        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5973                       MEMTRACK_INVALID_ALIASING, "MEM", "Buffer %" PRIx64 " is alised with image %" PRIx64, object_handle,
5974                       other_handle);
5975    } else {
5976        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5977                       MEMTRACK_INVALID_ALIASING, "MEM", "Image %" PRIx64 " is alised with buffer %" PRIx64, object_handle,
5978                       other_handle);
5979    }
5980}
5981
5982VkBool32 validate_memory_range(layer_data *dev_data, const vector<MEMORY_RANGE> &ranges, const MEMORY_RANGE &new_range,
5983                               VkDebugReportObjectTypeEXT object_type) {
5984    VkBool32 skip_call = false;
5985
5986    for (auto range : ranges) {
5987        if ((range.end & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)) <
5988            (new_range.start & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)))
5989            continue;
5990        if ((range.start & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)) >
5991            (new_range.end & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)))
5992            continue;
5993        skip_call |= print_memory_range_error(dev_data, new_range.handle, range.handle, object_type);
5994    }
5995    return skip_call;
5996}
5997
5998VkBool32 validate_buffer_image_aliasing(layer_data *dev_data, uint64_t handle, VkDeviceMemory mem, VkDeviceSize memoryOffset,
5999                                        VkMemoryRequirements memRequirements, vector<MEMORY_RANGE> &ranges,
6000                                        const vector<MEMORY_RANGE> &other_ranges, VkDebugReportObjectTypeEXT object_type) {
6001    MEMORY_RANGE range;
6002    range.handle = handle;
6003    range.memory = mem;
6004    range.start = memoryOffset;
6005    range.end = memoryOffset + memRequirements.size - 1;
6006    ranges.push_back(range);
6007    return validate_memory_range(dev_data, other_ranges, range, object_type);
6008}
6009
6010VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6011vkBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
6012    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6013    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6014    loader_platform_thread_lock_mutex(&globalLock);
6015    // Track objects tied to memory
6016    uint64_t buffer_handle = (uint64_t)(buffer);
6017    VkBool32 skipCall =
6018        set_mem_binding(dev_data, device, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
6019    add_object_binding_info(dev_data, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, mem);
6020    {
6021        VkMemoryRequirements memRequirements;
6022        // MTMTODO : Shouldn't this call down the chain?
6023        vkGetBufferMemoryRequirements(device, buffer, &memRequirements);
6024        skipCall |= validate_buffer_image_aliasing(dev_data, buffer_handle, mem, memoryOffset, memRequirements,
6025                                                   dev_data->memObjMap[mem].bufferRanges, dev_data->memObjMap[mem].imageRanges,
6026                                                   VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
6027        // Validate memory requirements alignment
6028        if (vk_safe_modulo(memoryOffset, memRequirements.alignment) != 0) {
6029            skipCall |=
6030                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
6031                        __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS",
6032                        "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be an integer multiple of the "
6033                        "VkMemoryRequirements::alignment value %#" PRIxLEAST64
6034                        ", returned from a call to vkGetBufferMemoryRequirements with buffer",
6035                        memoryOffset, memRequirements.alignment);
6036        }
6037        // Validate device limits alignments
6038        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].create_info->usage;
6039        if (usage & (VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)) {
6040            if (vk_safe_modulo(memoryOffset, dev_data->physDevProperties.properties.limits.minTexelBufferOffsetAlignment) != 0) {
6041                skipCall |=
6042                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
6043                            0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS",
6044                            "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be a multiple of "
6045                            "device limit minTexelBufferOffsetAlignment %#" PRIxLEAST64,
6046                            memoryOffset, dev_data->physDevProperties.properties.limits.minTexelBufferOffsetAlignment);
6047            }
6048        }
6049        if (usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) {
6050            if (vk_safe_modulo(memoryOffset, dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
6051                skipCall |=
6052                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
6053                            0, __LINE__, DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
6054                            "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be a multiple of "
6055                            "device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64,
6056                            memoryOffset, dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment);
6057            }
6058        }
6059        if (usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) {
6060            if (vk_safe_modulo(memoryOffset, dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
6061                skipCall |=
6062                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
6063                            0, __LINE__, DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
6064                            "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be a multiple of "
6065                            "device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64,
6066                            memoryOffset, dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment);
6067            }
6068        }
6069    }
6070    print_mem_list(dev_data, device);
6071    loader_platform_thread_unlock_mutex(&globalLock);
6072    if (VK_FALSE == skipCall) {
6073        result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset);
6074    }
6075    return result;
6076}
6077
6078VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6079vkGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
6080    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6081    // TODO : What to track here?
6082    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
6083    my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
6084}
6085
6086VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6087vkGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
6088    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6089    // TODO : What to track here?
6090    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
6091    my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements);
6092}
6093#endif
6094VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6095vkDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
6096    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6097        ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
6098    // TODO : Clean up any internal data structures using this obj.
6099}
6100
6101VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6102vkDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
6103    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6104
6105    loader_platform_thread_lock_mutex(&globalLock);
6106
6107    my_data->shaderModuleMap.erase(shaderModule);
6108
6109    loader_platform_thread_unlock_mutex(&globalLock);
6110
6111    my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
6112}
6113
6114VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6115vkDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
6116    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator);
6117    // TODO : Clean up any internal data structures using this obj.
6118}
6119
6120VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6121vkDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
6122    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6123        ->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
6124    // TODO : Clean up any internal data structures using this obj.
6125}
6126
6127VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6128vkDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
6129    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator);
6130    // TODO : Clean up any internal data structures using this obj.
6131}
6132
6133VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6134vkDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
6135    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6136        ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
6137    // TODO : Clean up any internal data structures using this obj.
6138}
6139
6140VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6141vkDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
6142    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6143        ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
6144    // TODO : Clean up any internal data structures using this obj.
6145}
6146
6147VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6148vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
6149    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6150
6151    bool skip_call = false;
6152    loader_platform_thread_lock_mutex(&globalLock);
6153    for (uint32_t i = 0; i < commandBufferCount; i++) {
6154#if MTMERGESOURCE
6155        clear_cmd_buf_and_mem_references(dev_data, pCommandBuffers[i]);
6156#endif
6157        if (dev_data->globalInFlightCmdBuffers.count(pCommandBuffers[i])) {
6158            skip_call |=
6159                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6160                        reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6161                        "Attempt to free command buffer (%#" PRIxLEAST64 ") which is in use.",
6162                        reinterpret_cast<uint64_t>(pCommandBuffers[i]));
6163        }
6164        // Delete CB information structure, and remove from commandBufferMap
6165        auto cb = dev_data->commandBufferMap.find(pCommandBuffers[i]);
6166        if (cb != dev_data->commandBufferMap.end()) {
6167            // reset prior to delete for data clean-up
6168            resetCB(dev_data, (*cb).second->commandBuffer);
6169            delete (*cb).second;
6170            dev_data->commandBufferMap.erase(cb);
6171        }
6172
6173        // Remove commandBuffer reference from commandPoolMap
6174        dev_data->commandPoolMap[commandPool].commandBuffers.remove(pCommandBuffers[i]);
6175    }
6176#if MTMERGESOURCE
6177    printCBList(dev_data, device);
6178#endif
6179    loader_platform_thread_unlock_mutex(&globalLock);
6180
6181    if (!skip_call)
6182        dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
6183}
6184
6185VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
6186                                                                   const VkAllocationCallbacks *pAllocator,
6187                                                                   VkCommandPool *pCommandPool) {
6188    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6189
6190    VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
6191
6192    if (VK_SUCCESS == result) {
6193        loader_platform_thread_lock_mutex(&globalLock);
6194        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
6195        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
6196        loader_platform_thread_unlock_mutex(&globalLock);
6197    }
6198    return result;
6199}
6200
6201VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
6202                                                                 const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
6203
6204    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6205    VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
6206    if (result == VK_SUCCESS) {
6207        loader_platform_thread_lock_mutex(&globalLock);
6208        dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo;
6209        loader_platform_thread_unlock_mutex(&globalLock);
6210    }
6211    return result;
6212}
6213
6214VkBool32 validateCommandBuffersNotInUse(const layer_data *dev_data, VkCommandPool commandPool) {
6215    VkBool32 skipCall = VK_FALSE;
6216    auto pool_data = dev_data->commandPoolMap.find(commandPool);
6217    if (pool_data != dev_data->commandPoolMap.end()) {
6218        for (auto cmdBuffer : pool_data->second.commandBuffers) {
6219            if (dev_data->globalInFlightCmdBuffers.count(cmdBuffer)) {
6220                skipCall |=
6221                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT,
6222                            (uint64_t)(commandPool), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
6223                            "Cannot reset command pool %" PRIx64 " when allocated command buffer %" PRIx64 " is in use.",
6224                            (uint64_t)(commandPool), (uint64_t)(cmdBuffer));
6225            }
6226        }
6227    }
6228    return skipCall;
6229}
6230
6231// Destroy commandPool along with all of the commandBuffers allocated from that pool
6232VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6233vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
6234    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6235    bool commandBufferComplete = false;
6236    bool skipCall = false;
6237    loader_platform_thread_lock_mutex(&globalLock);
6238#if MTMERGESOURCE
6239    // Verify that command buffers in pool are complete (not in-flight)
6240    // MTMTODO : Merge this with code below (separate *NotInUse() call)
6241    for (auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6242         it != dev_data->commandPoolMap[commandPool].commandBuffers.end(); it++) {
6243        commandBufferComplete = VK_FALSE;
6244        skipCall = checkCBCompleted(dev_data, *it, &commandBufferComplete);
6245        if (VK_FALSE == commandBufferComplete) {
6246            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6247                                (uint64_t)(*it), __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6248                                "Destroying Command Pool 0x%" PRIxLEAST64 " before "
6249                                "its command buffer (0x%" PRIxLEAST64 ") has completed.",
6250                                (uint64_t)(commandPool), reinterpret_cast<uint64_t>(*it));
6251        }
6252    }
6253#endif
6254    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandPoolMap
6255    if (dev_data->commandPoolMap.find(commandPool) != dev_data->commandPoolMap.end()) {
6256        for (auto poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6257             poolCb != dev_data->commandPoolMap[commandPool].commandBuffers.end();) {
6258            auto del_cb = dev_data->commandBufferMap.find(*poolCb);
6259            delete (*del_cb).second;                  // delete CB info structure
6260            dev_data->commandBufferMap.erase(del_cb); // Remove this command buffer
6261            poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.erase(
6262                poolCb); // Remove CB reference from commandPoolMap's list
6263        }
6264    }
6265    dev_data->commandPoolMap.erase(commandPool);
6266
6267    loader_platform_thread_unlock_mutex(&globalLock);
6268
6269    if (VK_TRUE == validateCommandBuffersNotInUse(dev_data, commandPool))
6270        return;
6271
6272    if (!skipCall)
6273        dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
6274#if MTMERGESOURCE
6275    loader_platform_thread_lock_mutex(&globalLock);
6276    auto item = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6277    // Remove command buffers from command buffer map
6278    while (item != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6279        auto del_item = item++;
6280        delete_cmd_buf_info(dev_data, commandPool, *del_item);
6281    }
6282    dev_data->commandPoolMap.erase(commandPool);
6283    loader_platform_thread_unlock_mutex(&globalLock);
6284#endif
6285}
6286
6287VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6288vkResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
6289    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6290    bool commandBufferComplete = false;
6291    bool skipCall = false;
6292    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6293#if MTMERGESOURCE
6294    // MTMTODO : Merge this with *NotInUse() call below
6295    loader_platform_thread_lock_mutex(&globalLock);
6296    auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6297    // Verify that CB's in pool are complete (not in-flight)
6298    while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6299        skipCall = checkCBCompleted(dev_data, (*it), &commandBufferComplete);
6300        if (!commandBufferComplete) {
6301            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6302                                (uint64_t)(*it), __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6303                                "Resetting CB %p before it has completed. You must check CB "
6304                                "flag before calling vkResetCommandBuffer().",
6305                                (*it));
6306        } else {
6307            // Clear memory references at this point.
6308            clear_cmd_buf_and_mem_references(dev_data, (*it));
6309        }
6310        ++it;
6311    }
6312    loader_platform_thread_unlock_mutex(&globalLock);
6313#endif
6314    if (VK_TRUE == validateCommandBuffersNotInUse(dev_data, commandPool))
6315        return VK_ERROR_VALIDATION_FAILED_EXT;
6316
6317    if (!skipCall)
6318        result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
6319
6320    // Reset all of the CBs allocated from this pool
6321    if (VK_SUCCESS == result) {
6322        loader_platform_thread_lock_mutex(&globalLock);
6323        auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6324        while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6325            resetCB(dev_data, (*it));
6326            ++it;
6327        }
6328        loader_platform_thread_unlock_mutex(&globalLock);
6329    }
6330    return result;
6331}
6332
6333VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
6334    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6335    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6336    bool skipCall = false;
6337    loader_platform_thread_lock_mutex(&globalLock);
6338    for (uint32_t i = 0; i < fenceCount; ++i) {
6339#if MTMERGESOURCE
6340        // Reset fence state in fenceCreateInfo structure
6341        // MTMTODO : Merge with code below
6342        auto fence_item = dev_data->fenceMap.find(pFences[i]);
6343        if (fence_item != dev_data->fenceMap.end()) {
6344            // Validate fences in SIGNALED state
6345            if (!(fence_item->second.createInfo.flags & VK_FENCE_CREATE_SIGNALED_BIT)) {
6346                // TODO: I don't see a Valid Usage section for ResetFences. This behavior should be documented there.
6347                skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6348                                   (uint64_t)pFences[i], __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
6349                                   "Fence %#" PRIxLEAST64 " submitted to VkResetFences in UNSIGNALED STATE", (uint64_t)pFences[i]);
6350            } else {
6351                fence_item->second.createInfo.flags =
6352                    static_cast<VkFenceCreateFlags>(fence_item->second.createInfo.flags & ~VK_FENCE_CREATE_SIGNALED_BIT);
6353            }
6354        }
6355#endif
6356        if (dev_data->fenceMap[pFences[i]].in_use.load()) {
6357            skipCall |=
6358                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6359                        reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
6360                        "Fence %#" PRIx64 " is in use by a command buffer.", reinterpret_cast<const uint64_t &>(pFences[i]));
6361        }
6362    }
6363    loader_platform_thread_unlock_mutex(&globalLock);
6364    if (!skipCall)
6365        result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences);
6366    return result;
6367}
6368
6369VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6370vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
6371    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6372    loader_platform_thread_lock_mutex(&globalLock);
6373    auto fbNode = dev_data->frameBufferMap.find(framebuffer);
6374    if (fbNode != dev_data->frameBufferMap.end()) {
6375        for (auto cb : fbNode->second.referencingCmdBuffers) {
6376            auto cbNode = dev_data->commandBufferMap.find(cb);
6377            if (cbNode != dev_data->commandBufferMap.end()) {
6378                // Set CB as invalid and record destroyed framebuffer
6379                cbNode->second->state = CB_INVALID;
6380                cbNode->second->destroyedFramebuffers.insert(framebuffer);
6381            }
6382        }
6383        delete [] fbNode->second.createInfo.pAttachments;
6384        dev_data->frameBufferMap.erase(fbNode);
6385    }
6386    loader_platform_thread_unlock_mutex(&globalLock);
6387    dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
6388}
6389
6390VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6391vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
6392    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6393    dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
6394    loader_platform_thread_lock_mutex(&globalLock);
6395    dev_data->renderPassMap.erase(renderPass);
6396    loader_platform_thread_unlock_mutex(&globalLock);
6397}
6398
6399VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
6400                                                              const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
6401    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6402
6403    VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
6404
6405    if (VK_SUCCESS == result) {
6406        loader_platform_thread_lock_mutex(&globalLock);
6407#if MTMERGESOURCE
6408        add_object_create_info(dev_data, (uint64_t)*pBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, pCreateInfo);
6409#endif
6410        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
6411        dev_data->bufferMap[*pBuffer].create_info = unique_ptr<VkBufferCreateInfo>(new VkBufferCreateInfo(*pCreateInfo));
6412        dev_data->bufferMap[*pBuffer].in_use.store(0);
6413        loader_platform_thread_unlock_mutex(&globalLock);
6414    }
6415    return result;
6416}
6417
6418VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6419                                                                  const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
6420    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6421    VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
6422    if (VK_SUCCESS == result) {
6423        loader_platform_thread_lock_mutex(&globalLock);
6424        dev_data->bufferViewMap[*pView] = VkBufferViewCreateInfo(*pCreateInfo);
6425#if MTMERGESOURCE
6426        // In order to create a valid buffer view, the buffer must have been created with at least one of the
6427        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
6428        validate_buffer_usage_flags(dev_data, device, pCreateInfo->buffer,
6429                                    VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, VK_FALSE,
6430                                    "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
6431#endif
6432        loader_platform_thread_unlock_mutex(&globalLock);
6433    }
6434    return result;
6435}
6436
6437VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
6438                                                             const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
6439    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6440
6441    VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
6442
6443    if (VK_SUCCESS == result) {
6444        loader_platform_thread_lock_mutex(&globalLock);
6445#if MTMERGESOURCE
6446        add_object_create_info(dev_data, (uint64_t)*pImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, pCreateInfo);
6447#endif
6448        IMAGE_LAYOUT_NODE image_node;
6449        image_node.layout = pCreateInfo->initialLayout;
6450        image_node.format = pCreateInfo->format;
6451        dev_data->imageMap[*pImage].createInfo = *pCreateInfo;
6452        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
6453        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
6454        dev_data->imageLayoutMap[subpair] = image_node;
6455        loader_platform_thread_unlock_mutex(&globalLock);
6456    }
6457    return result;
6458}
6459
6460static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
6461    /* expects globalLock to be held by caller */
6462
6463    auto image_node_it = dev_data->imageMap.find(image);
6464    if (image_node_it != dev_data->imageMap.end()) {
6465        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
6466         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
6467         * the actual values.
6468         */
6469        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
6470            range->levelCount = image_node_it->second.createInfo.mipLevels - range->baseMipLevel;
6471        }
6472
6473        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
6474            range->layerCount = image_node_it->second.createInfo.arrayLayers - range->baseArrayLayer;
6475        }
6476    }
6477}
6478
6479// Return the correct layer/level counts if the caller used the special
6480// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
6481static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6482                                         VkImage image) {
6483    /* expects globalLock to be held by caller */
6484
6485    *levels = range.levelCount;
6486    *layers = range.layerCount;
6487    auto image_node_it = dev_data->imageMap.find(image);
6488    if (image_node_it != dev_data->imageMap.end()) {
6489        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
6490            *levels = image_node_it->second.createInfo.mipLevels - range.baseMipLevel;
6491        }
6492        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
6493            *layers = image_node_it->second.createInfo.arrayLayers - range.baseArrayLayer;
6494        }
6495    }
6496}
6497
6498VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6499                                                                 const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6500    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6501    VkResult result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
6502    if (VK_SUCCESS == result) {
6503        loader_platform_thread_lock_mutex(&globalLock);
6504        VkImageViewCreateInfo localCI = VkImageViewCreateInfo(*pCreateInfo);
6505        ResolveRemainingLevelsLayers(dev_data, &localCI.subresourceRange, pCreateInfo->image);
6506        dev_data->imageViewMap[*pView] = localCI;
6507#if MTMERGESOURCE
6508        // Validate that img has correct usage flags set
6509        validate_image_usage_flags(dev_data, device, pCreateInfo->image,
6510                                   VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT |
6511                                       VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6512                                   VK_FALSE, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT");
6513#endif
6514        loader_platform_thread_unlock_mutex(&globalLock);
6515    }
6516    return result;
6517}
6518
6519VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6520vkCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6521    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6522    VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
6523    if (VK_SUCCESS == result) {
6524        loader_platform_thread_lock_mutex(&globalLock);
6525        FENCE_NODE *pFN = &dev_data->fenceMap[*pFence];
6526#if MTMERGESOURCE
6527        memset(pFN, 0, sizeof(MT_FENCE_INFO));
6528        memcpy(&(pFN->createInfo), pCreateInfo, sizeof(VkFenceCreateInfo));
6529        if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
6530            pFN->firstTimeFlag = VK_TRUE;
6531        }
6532#endif
6533        pFN->in_use.store(0);
6534        loader_platform_thread_unlock_mutex(&globalLock);
6535    }
6536    return result;
6537}
6538
6539// TODO handle pipeline caches
6540VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6541                                                     const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6542    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6543    VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6544    return result;
6545}
6546
6547VKAPI_ATTR void VKAPI_CALL
6548vkDestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
6549    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6550    dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator);
6551}
6552
6553VKAPI_ATTR VkResult VKAPI_CALL
6554vkGetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
6555    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6556    VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6557    return result;
6558}
6559
6560VKAPI_ATTR VkResult VKAPI_CALL
6561vkMergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
6562    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6563    VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6564    return result;
6565}
6566
6567// utility function to set collective state for pipeline
6568void set_pipeline_state(PIPELINE_NODE *pPipe) {
6569    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
6570    if (pPipe->graphicsPipelineCI.pColorBlendState) {
6571        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
6572            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
6573                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6574                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6575                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6576                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6577                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6578                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6579                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6580                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
6581                    pPipe->blendConstantsEnabled = true;
6582                }
6583            }
6584        }
6585    }
6586}
6587
6588VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6589vkCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6590                          const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6591                          VkPipeline *pPipelines) {
6592    VkResult result = VK_SUCCESS;
6593    // TODO What to do with pipelineCache?
6594    // The order of operations here is a little convoluted but gets the job done
6595    //  1. Pipeline create state is first shadowed into PIPELINE_NODE struct
6596    //  2. Create state is then validated (which uses flags setup during shadowing)
6597    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6598    VkBool32 skipCall = VK_FALSE;
6599    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6600    vector<PIPELINE_NODE *> pPipeNode(count);
6601    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6602
6603    uint32_t i = 0;
6604    loader_platform_thread_lock_mutex(&globalLock);
6605
6606    for (i = 0; i < count; i++) {
6607        pPipeNode[i] = initGraphicsPipeline(dev_data, &pCreateInfos[i]);
6608        skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
6609    }
6610
6611    if (VK_FALSE == skipCall) {
6612        loader_platform_thread_unlock_mutex(&globalLock);
6613        result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6614                                                                          pPipelines);
6615        loader_platform_thread_lock_mutex(&globalLock);
6616        for (i = 0; i < count; i++) {
6617            pPipeNode[i]->pipeline = pPipelines[i];
6618            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6619        }
6620        loader_platform_thread_unlock_mutex(&globalLock);
6621    } else {
6622        for (i = 0; i < count; i++) {
6623            delete pPipeNode[i];
6624        }
6625        loader_platform_thread_unlock_mutex(&globalLock);
6626        return VK_ERROR_VALIDATION_FAILED_EXT;
6627    }
6628    return result;
6629}
6630
6631VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6632vkCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6633                         const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6634                         VkPipeline *pPipelines) {
6635    VkResult result = VK_SUCCESS;
6636    VkBool32 skipCall = VK_FALSE;
6637
6638    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6639    vector<PIPELINE_NODE *> pPipeNode(count);
6640    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6641
6642    uint32_t i = 0;
6643    loader_platform_thread_lock_mutex(&globalLock);
6644    for (i = 0; i < count; i++) {
6645        // TODO: Verify compute stage bits
6646
6647        // Create and initialize internal tracking data structure
6648        pPipeNode[i] = new PIPELINE_NODE;
6649        memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
6650
6651        // TODO: Add Compute Pipeline Verification
6652        // skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
6653    }
6654
6655    if (VK_FALSE == skipCall) {
6656        loader_platform_thread_unlock_mutex(&globalLock);
6657        result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6658                                                                         pPipelines);
6659        loader_platform_thread_lock_mutex(&globalLock);
6660        for (i = 0; i < count; i++) {
6661            pPipeNode[i]->pipeline = pPipelines[i];
6662            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6663        }
6664        loader_platform_thread_unlock_mutex(&globalLock);
6665    } else {
6666        for (i = 0; i < count; i++) {
6667            // Clean up any locally allocated data structures
6668            delete pPipeNode[i];
6669        }
6670        loader_platform_thread_unlock_mutex(&globalLock);
6671        return VK_ERROR_VALIDATION_FAILED_EXT;
6672    }
6673    return result;
6674}
6675
6676VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6677                                                               const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6678    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6679    VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6680    if (VK_SUCCESS == result) {
6681        loader_platform_thread_lock_mutex(&globalLock);
6682        dev_data->sampleMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
6683        loader_platform_thread_unlock_mutex(&globalLock);
6684    }
6685    return result;
6686}
6687
6688VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6689vkCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6690                            const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
6691    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6692    VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6693    if (VK_SUCCESS == result) {
6694        // TODOSC : Capture layout bindings set
6695        LAYOUT_NODE *pNewNode = new LAYOUT_NODE;
6696        if (NULL == pNewNode) {
6697            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
6698                        (uint64_t)*pSetLayout, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6699                        "Out of memory while attempting to allocate LAYOUT_NODE in vkCreateDescriptorSetLayout()"))
6700                return VK_ERROR_VALIDATION_FAILED_EXT;
6701        }
6702        memcpy((void *)&pNewNode->createInfo, pCreateInfo, sizeof(VkDescriptorSetLayoutCreateInfo));
6703        pNewNode->createInfo.pBindings = new VkDescriptorSetLayoutBinding[pCreateInfo->bindingCount];
6704        memcpy((void *)pNewNode->createInfo.pBindings, pCreateInfo->pBindings,
6705               sizeof(VkDescriptorSetLayoutBinding) * pCreateInfo->bindingCount);
6706        // g++ does not like reserve with size 0
6707        if (pCreateInfo->bindingCount)
6708            pNewNode->bindingToIndexMap.reserve(pCreateInfo->bindingCount);
6709        uint32_t totalCount = 0;
6710        for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
6711            if (!pNewNode->bindingToIndexMap.emplace(pCreateInfo->pBindings[i].binding, i).second) {
6712                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6713                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)*pSetLayout, __LINE__,
6714                            DRAWSTATE_INVALID_LAYOUT, "DS", "duplicated binding number in "
6715                                                            "VkDescriptorSetLayoutBinding"))
6716                    return VK_ERROR_VALIDATION_FAILED_EXT;
6717            } else {
6718                pNewNode->bindingToIndexMap[pCreateInfo->pBindings[i].binding] = i;
6719            }
6720            totalCount += pCreateInfo->pBindings[i].descriptorCount;
6721            if (pCreateInfo->pBindings[i].pImmutableSamplers) {
6722                VkSampler **ppIS = (VkSampler **)&pNewNode->createInfo.pBindings[i].pImmutableSamplers;
6723                *ppIS = new VkSampler[pCreateInfo->pBindings[i].descriptorCount];
6724                memcpy(*ppIS, pCreateInfo->pBindings[i].pImmutableSamplers,
6725                       pCreateInfo->pBindings[i].descriptorCount * sizeof(VkSampler));
6726            }
6727        }
6728        pNewNode->layout = *pSetLayout;
6729        pNewNode->startIndex = 0;
6730        if (totalCount > 0) {
6731            pNewNode->descriptorTypes.resize(totalCount);
6732            pNewNode->stageFlags.resize(totalCount);
6733            uint32_t offset = 0;
6734            uint32_t j = 0;
6735            VkDescriptorType dType;
6736            for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
6737                dType = pCreateInfo->pBindings[i].descriptorType;
6738                for (j = 0; j < pCreateInfo->pBindings[i].descriptorCount; j++) {
6739                    pNewNode->descriptorTypes[offset + j] = dType;
6740                    pNewNode->stageFlags[offset + j] = pCreateInfo->pBindings[i].stageFlags;
6741                    if ((dType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
6742                        (dType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
6743                        pNewNode->dynamicDescriptorCount++;
6744                    }
6745                }
6746                offset += j;
6747            }
6748            pNewNode->endIndex = pNewNode->startIndex + totalCount - 1;
6749        } else { // no descriptors
6750            pNewNode->endIndex = 0;
6751        }
6752        // Put new node at Head of global Layer list
6753        loader_platform_thread_lock_mutex(&globalLock);
6754        dev_data->descriptorSetLayoutMap[*pSetLayout] = pNewNode;
6755        loader_platform_thread_unlock_mutex(&globalLock);
6756    }
6757    return result;
6758}
6759
6760static bool validatePushConstantSize(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6761                                     const char *caller_name) {
6762    bool skipCall = false;
6763    if ((offset + size) > dev_data->physDevProperties.properties.limits.maxPushConstantsSize) {
6764        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6765                           DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
6766                                                                 "exceeds this device's maxPushConstantSize of %u.",
6767                           caller_name, offset, size, dev_data->physDevProperties.properties.limits.maxPushConstantsSize);
6768    }
6769    return skipCall;
6770}
6771
6772VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6773                                                      const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
6774    bool skipCall = false;
6775    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6776    uint32_t i = 0;
6777    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6778        skipCall |= validatePushConstantSize(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
6779                                             pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()");
6780        if ((pCreateInfo->pPushConstantRanges[i].size == 0) || ((pCreateInfo->pPushConstantRanges[i].size & 0x3) != 0)) {
6781            skipCall |=
6782                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6783                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constant index %u with "
6784                                                              "size %u. Size must be greater than zero and a multiple of 4.",
6785                        i, pCreateInfo->pPushConstantRanges[i].size);
6786        }
6787        // TODO : Add warning if ranges overlap
6788    }
6789    VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
6790    if (VK_SUCCESS == result) {
6791        loader_platform_thread_lock_mutex(&globalLock);
6792        // TODOSC : Merge capture of the setLayouts per pipeline
6793        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
6794        plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount);
6795        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6796            plNode.descriptorSetLayouts[i] = pCreateInfo->pSetLayouts[i];
6797        }
6798        plNode.pushConstantRanges.resize(pCreateInfo->pushConstantRangeCount);
6799        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6800            plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i];
6801        }
6802        loader_platform_thread_unlock_mutex(&globalLock);
6803    }
6804    return result;
6805}
6806
6807VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6808vkCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
6809                       VkDescriptorPool *pDescriptorPool) {
6810    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6811    VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
6812    if (VK_SUCCESS == result) {
6813        // Insert this pool into Global Pool LL at head
6814        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6815                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool %#" PRIxLEAST64,
6816                    (uint64_t)*pDescriptorPool))
6817            return VK_ERROR_VALIDATION_FAILED_EXT;
6818        DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
6819        if (NULL == pNewNode) {
6820            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6821                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6822                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
6823                return VK_ERROR_VALIDATION_FAILED_EXT;
6824        } else {
6825            loader_platform_thread_lock_mutex(&globalLock);
6826            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6827            loader_platform_thread_unlock_mutex(&globalLock);
6828        }
6829    } else {
6830        // Need to do anything if pool create fails?
6831    }
6832    return result;
6833}
6834
6835VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6836vkResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
6837    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6838    VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags);
6839    if (VK_SUCCESS == result) {
6840        loader_platform_thread_lock_mutex(&globalLock);
6841        clearDescriptorPool(dev_data, device, descriptorPool, flags);
6842        loader_platform_thread_unlock_mutex(&globalLock);
6843    }
6844    return result;
6845}
6846
6847VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6848vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
6849    VkBool32 skipCall = VK_FALSE;
6850    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6851
6852    loader_platform_thread_lock_mutex(&globalLock);
6853    // Verify that requested descriptorSets are available in pool
6854    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6855    if (!pPoolNode) {
6856        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6857                            (uint64_t)pAllocateInfo->descriptorPool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
6858                            "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
6859                            (uint64_t)pAllocateInfo->descriptorPool);
6860    } else { // Make sure pool has all the available descriptors before calling down chain
6861        skipCall |= validate_descriptor_availability_in_pool(dev_data, pPoolNode, pAllocateInfo->descriptorSetCount,
6862                                                             pAllocateInfo->pSetLayouts);
6863    }
6864    loader_platform_thread_unlock_mutex(&globalLock);
6865    if (skipCall)
6866        return VK_ERROR_VALIDATION_FAILED_EXT;
6867    VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6868    if (VK_SUCCESS == result) {
6869        loader_platform_thread_lock_mutex(&globalLock);
6870        DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6871        if (pPoolNode) {
6872            if (pAllocateInfo->descriptorSetCount == 0) {
6873                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6874                        pAllocateInfo->descriptorSetCount, __LINE__, DRAWSTATE_NONE, "DS",
6875                        "AllocateDescriptorSets called with 0 count");
6876            }
6877            for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
6878                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6879                        (uint64_t)pDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS", "Created Descriptor Set %#" PRIxLEAST64,
6880                        (uint64_t)pDescriptorSets[i]);
6881                // Create new set node and add to head of pool nodes
6882                SET_NODE *pNewNode = new SET_NODE;
6883                if (NULL == pNewNode) {
6884                    if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6885                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6886                                DRAWSTATE_OUT_OF_MEMORY, "DS",
6887                                "Out of memory while attempting to allocate SET_NODE in vkAllocateDescriptorSets()")) {
6888                        loader_platform_thread_unlock_mutex(&globalLock);
6889                        return VK_ERROR_VALIDATION_FAILED_EXT;
6890                    }
6891                } else {
6892                    // TODO : Pool should store a total count of each type of Descriptor available
6893                    //  When descriptors are allocated, decrement the count and validate here
6894                    //  that the count doesn't go below 0. One reset/free need to bump count back up.
6895                    // Insert set at head of Set LL for this pool
6896                    pNewNode->pNext = pPoolNode->pSets;
6897                    pNewNode->in_use.store(0);
6898                    pPoolNode->pSets = pNewNode;
6899                    LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pAllocateInfo->pSetLayouts[i]);
6900                    if (NULL == pLayout) {
6901                        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6902                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pAllocateInfo->pSetLayouts[i],
6903                                    __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
6904                                    "Unable to find set layout node for layout %#" PRIxLEAST64
6905                                    " specified in vkAllocateDescriptorSets() call",
6906                                    (uint64_t)pAllocateInfo->pSetLayouts[i])) {
6907                            loader_platform_thread_unlock_mutex(&globalLock);
6908                            return VK_ERROR_VALIDATION_FAILED_EXT;
6909                        }
6910                    }
6911                    pNewNode->pLayout = pLayout;
6912                    pNewNode->pool = pAllocateInfo->descriptorPool;
6913                    pNewNode->set = pDescriptorSets[i];
6914                    pNewNode->descriptorCount = (pLayout->createInfo.bindingCount != 0) ? pLayout->endIndex + 1 : 0;
6915                    if (pNewNode->descriptorCount) {
6916                        pNewNode->pDescriptorUpdates.resize(pNewNode->descriptorCount);
6917                    }
6918                    dev_data->setMap[pDescriptorSets[i]] = pNewNode;
6919                }
6920            }
6921        }
6922        loader_platform_thread_unlock_mutex(&globalLock);
6923    }
6924    return result;
6925}
6926
6927VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6928vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6929    VkBool32 skipCall = VK_FALSE;
6930    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6931    // Make sure that no sets being destroyed are in-flight
6932    loader_platform_thread_lock_mutex(&globalLock);
6933    for (uint32_t i = 0; i < count; ++i)
6934        skipCall |= validateIdleDescriptorSet(dev_data, pDescriptorSets[i], "vkFreeDesriptorSets");
6935    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, descriptorPool);
6936    if (pPoolNode && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pPoolNode->createInfo.flags)) {
6937        // Can't Free from a NON_FREE pool
6938        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
6939                            (uint64_t)device, __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
6940                            "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6941                            "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
6942    }
6943    loader_platform_thread_unlock_mutex(&globalLock);
6944    if (VK_FALSE != skipCall)
6945        return VK_ERROR_VALIDATION_FAILED_EXT;
6946    VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6947    if (VK_SUCCESS == result) {
6948        loader_platform_thread_lock_mutex(&globalLock);
6949
6950        // Update available descriptor sets in pool
6951        pPoolNode->availableSets += count;
6952
6953        // For each freed descriptor add it back into the pool as available
6954        for (uint32_t i = 0; i < count; ++i) {
6955            SET_NODE *pSet = dev_data->setMap[pDescriptorSets[i]]; // getSetNode() without locking
6956            invalidateBoundCmdBuffers(dev_data, pSet);
6957            LAYOUT_NODE *pLayout = pSet->pLayout;
6958            uint32_t typeIndex = 0, poolSizeCount = 0;
6959            for (uint32_t j = 0; j < pLayout->createInfo.bindingCount; ++j) {
6960                typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType);
6961                poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount;
6962                pPoolNode->availableDescriptorTypeCount[typeIndex] += poolSizeCount;
6963            }
6964        }
6965        loader_platform_thread_unlock_mutex(&globalLock);
6966    }
6967    // TODO : Any other clean-up or book-keeping to do here?
6968    return result;
6969}
6970
6971VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6972vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6973                       uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6974    // dsUpdate will return VK_TRUE only if a bailout error occurs, so we want to call down tree when update returns VK_FALSE
6975    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6976    loader_platform_thread_lock_mutex(&globalLock);
6977    VkBool32 rtn = dsUpdate(dev_data, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
6978    loader_platform_thread_unlock_mutex(&globalLock);
6979    if (!rtn) {
6980        dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6981                                                              pDescriptorCopies);
6982    }
6983}
6984
6985VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6986vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6987    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6988    VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6989    if (VK_SUCCESS == result) {
6990        loader_platform_thread_lock_mutex(&globalLock);
6991        auto const &cp_it = dev_data->commandPoolMap.find(pCreateInfo->commandPool);
6992        if (cp_it != dev_data->commandPoolMap.end()) {
6993            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6994                // Add command buffer to its commandPool map
6995                cp_it->second.commandBuffers.push_back(pCommandBuffer[i]);
6996                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6997                // Add command buffer to map
6998                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6999                resetCB(dev_data, pCommandBuffer[i]);
7000                pCB->createInfo = *pCreateInfo;
7001                pCB->device = device;
7002            }
7003        }
7004#if MTMERGESOURCE
7005        printCBList(dev_data, device);
7006#endif
7007        loader_platform_thread_unlock_mutex(&globalLock);
7008    }
7009    return result;
7010}
7011
7012VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
7013vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
7014    VkBool32 skipCall = VK_FALSE;
7015    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7016    loader_platform_thread_lock_mutex(&globalLock);
7017    // Validate command buffer level
7018    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7019    if (pCB) {
7020#if MTMERGESOURCE
7021        bool commandBufferComplete = false;
7022        // MTMTODO : Merge this with code below
7023        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
7024        skipCall = checkCBCompleted(dev_data, commandBuffer, &commandBufferComplete);
7025
7026        if (!commandBufferComplete) {
7027            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7028                                (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
7029                                "Calling vkBeginCommandBuffer() on active CB %p before it has completed. "
7030                                "You must check CB flag before this call.",
7031                                commandBuffer);
7032        }
7033#endif
7034        if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
7035            // Secondary Command Buffer
7036            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
7037            if (!pInfo) {
7038                skipCall |=
7039                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7040                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7041                            "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must have inheritance info.",
7042                            reinterpret_cast<void *>(commandBuffer));
7043            } else {
7044                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
7045                    if (!pInfo->renderPass) { // renderpass should NOT be null for an Secondary CB
7046                        skipCall |= log_msg(
7047                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7048                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7049                            "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must specify a valid renderpass parameter.",
7050                            reinterpret_cast<void *>(commandBuffer));
7051                    }
7052                    if (!pInfo->framebuffer) { // framebuffer may be null for an Secondary CB, but this affects perf
7053                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7054                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7055                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE,
7056                                            "DS", "vkBeginCommandBuffer(): Secondary Command Buffers (%p) may perform better if a "
7057                                                  "valid framebuffer parameter is specified.",
7058                                            reinterpret_cast<void *>(commandBuffer));
7059                    } else {
7060                        string errorString = "";
7061                        auto fbNode = dev_data->frameBufferMap.find(pInfo->framebuffer);
7062                        if (fbNode != dev_data->frameBufferMap.end()) {
7063                            VkRenderPass fbRP = fbNode->second.createInfo.renderPass;
7064                            if (!verify_renderpass_compatibility(dev_data, fbRP, pInfo->renderPass, errorString)) {
7065                                // renderPass that framebuffer was created with
7066                                // must
7067                                // be compatible with local renderPass
7068                                skipCall |=
7069                                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7070                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7071                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
7072                                            "DS", "vkBeginCommandBuffer(): Secondary Command "
7073                                                  "Buffer (%p) renderPass (%#" PRIxLEAST64 ") is incompatible w/ framebuffer "
7074                                                  "(%#" PRIxLEAST64 ") w/ render pass (%#" PRIxLEAST64 ") due to: %s",
7075                                            reinterpret_cast<void *>(commandBuffer), (uint64_t)(pInfo->renderPass),
7076                                            (uint64_t)(pInfo->framebuffer), (uint64_t)(fbRP), errorString.c_str());
7077                            }
7078                            // Connect this framebuffer to this cmdBuffer
7079                            fbNode->second.referencingCmdBuffers.insert(pCB->commandBuffer);
7080                        }
7081                    }
7082                }
7083                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
7084                     dev_data->physDevProperties.features.occlusionQueryPrecise == VK_FALSE) &&
7085                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
7086                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7087                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
7088                                        __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7089                                        "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must not have "
7090                                        "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
7091                                        "support precise occlusion queries.",
7092                                        reinterpret_cast<void *>(commandBuffer));
7093                }
7094            }
7095            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
7096                auto rp_data = dev_data->renderPassMap.find(pInfo->renderPass);
7097                if (rp_data != dev_data->renderPassMap.end() && rp_data->second && rp_data->second->pCreateInfo) {
7098                    if (pInfo->subpass >= rp_data->second->pCreateInfo->subpassCount) {
7099                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7100                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7101                                            DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7102                                            "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must has a subpass index (%d) "
7103                                            "that is less than the number of subpasses (%d).",
7104                                            (void *)commandBuffer, pInfo->subpass, rp_data->second->pCreateInfo->subpassCount);
7105                    }
7106                }
7107            }
7108        }
7109        if (CB_RECORDING == pCB->state) {
7110            skipCall |=
7111                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7112                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7113                        "vkBeginCommandBuffer(): Cannot call Begin on CB (%#" PRIxLEAST64
7114                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
7115                        (uint64_t)commandBuffer);
7116        } else if (CB_RECORDED == pCB->state) {
7117            VkCommandPool cmdPool = pCB->createInfo.commandPool;
7118            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
7119                skipCall |=
7120                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7121                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7122                            "Call to vkBeginCommandBuffer() on command buffer (%#" PRIxLEAST64
7123                            ") attempts to implicitly reset cmdBuffer created from command pool (%#" PRIxLEAST64
7124                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7125                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
7126            }
7127            resetCB(dev_data, commandBuffer);
7128        }
7129        // Set updated state here in case implicit reset occurs above
7130        pCB->state = CB_RECORDING;
7131        pCB->beginInfo = *pBeginInfo;
7132        if (pCB->beginInfo.pInheritanceInfo) {
7133            pCB->inheritanceInfo = *(pCB->beginInfo.pInheritanceInfo);
7134            pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo;
7135        }
7136    } else {
7137        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7138                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
7139                            "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB %p!", (void *)commandBuffer);
7140    }
7141    loader_platform_thread_unlock_mutex(&globalLock);
7142    if (VK_FALSE != skipCall) {
7143        return VK_ERROR_VALIDATION_FAILED_EXT;
7144    }
7145    VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo);
7146#if MTMERGESOURCE
7147    loader_platform_thread_lock_mutex(&globalLock);
7148    clear_cmd_buf_and_mem_references(dev_data, commandBuffer);
7149    loader_platform_thread_unlock_mutex(&globalLock);
7150#endif
7151    return result;
7152}
7153
7154VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffer commandBuffer) {
7155    VkBool32 skipCall = VK_FALSE;
7156    VkResult result = VK_SUCCESS;
7157    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7158    loader_platform_thread_lock_mutex(&globalLock);
7159    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7160    if (pCB) {
7161        if (pCB->state != CB_RECORDING) {
7162            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkEndCommandBuffer()");
7163        }
7164        for (auto query : pCB->activeQueries) {
7165            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7166                                DRAWSTATE_INVALID_QUERY, "DS",
7167                                "Ending command buffer with in progress query: queryPool %" PRIu64 ", index %d",
7168                                (uint64_t)(query.pool), query.index);
7169        }
7170    }
7171    if (VK_FALSE == skipCall) {
7172        loader_platform_thread_unlock_mutex(&globalLock);
7173        result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
7174        loader_platform_thread_lock_mutex(&globalLock);
7175        if (VK_SUCCESS == result) {
7176            pCB->state = CB_RECORDED;
7177            // Reset CB status flags
7178            pCB->status = 0;
7179            printCB(dev_data, commandBuffer);
7180        }
7181    } else {
7182        result = VK_ERROR_VALIDATION_FAILED_EXT;
7183    }
7184    loader_platform_thread_unlock_mutex(&globalLock);
7185    return result;
7186}
7187
7188VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
7189vkResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
7190    VkBool32 skipCall = VK_FALSE;
7191    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7192    loader_platform_thread_lock_mutex(&globalLock);
7193#if MTMERGESOURCE
7194    bool commandBufferComplete = false;
7195    // Verify that CB is complete (not in-flight)
7196    skipCall = checkCBCompleted(dev_data, commandBuffer, &commandBufferComplete);
7197    if (!commandBufferComplete) {
7198        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7199                            (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
7200                            "Resetting CB %p before it has completed. You must check CB "
7201                            "flag before calling vkResetCommandBuffer().",
7202                            commandBuffer);
7203    }
7204    // Clear memory references as this point.
7205    clear_cmd_buf_and_mem_references(dev_data, commandBuffer);
7206#endif
7207    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7208    VkCommandPool cmdPool = pCB->createInfo.commandPool;
7209    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
7210        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7211                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7212                            "Attempt to reset command buffer (%#" PRIxLEAST64 ") created from command pool (%#" PRIxLEAST64
7213                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7214                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
7215    }
7216    if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
7217        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7218                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7219                            "Attempt to reset command buffer (%#" PRIxLEAST64 ") which is in use.",
7220                            reinterpret_cast<uint64_t>(commandBuffer));
7221    }
7222    loader_platform_thread_unlock_mutex(&globalLock);
7223    if (skipCall != VK_FALSE)
7224        return VK_ERROR_VALIDATION_FAILED_EXT;
7225    VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags);
7226    if (VK_SUCCESS == result) {
7227        loader_platform_thread_lock_mutex(&globalLock);
7228        resetCB(dev_data, commandBuffer);
7229        loader_platform_thread_unlock_mutex(&globalLock);
7230    }
7231    return result;
7232}
7233#if MTMERGESOURCE
7234// TODO : For any vkCmdBind* calls that include an object which has mem bound to it,
7235//    need to account for that mem now having binding to given commandBuffer
7236#endif
7237VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7238vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
7239    VkBool32 skipCall = VK_FALSE;
7240    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7241    loader_platform_thread_lock_mutex(&globalLock);
7242    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7243    if (pCB) {
7244        skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
7245        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
7246            skipCall |=
7247                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7248                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
7249                        "Incorrectly binding compute pipeline (%#" PRIxLEAST64 ") during active RenderPass (%#" PRIxLEAST64 ")",
7250                        (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass);
7251        }
7252
7253        PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
7254        if (pPN) {
7255            pCB->lastBound[pipelineBindPoint].pipeline = pipeline;
7256            set_cb_pso_status(pCB, pPN);
7257            set_pipeline_state(pPN);
7258            skipCall |= validatePipelineState(dev_data, pCB, pipelineBindPoint, pipeline);
7259        } else {
7260            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7261                                (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
7262                                "Attempt to bind Pipeline %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
7263        }
7264    }
7265    loader_platform_thread_unlock_mutex(&globalLock);
7266    if (VK_FALSE == skipCall)
7267        dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
7268}
7269
7270VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7271vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
7272    VkBool32 skipCall = VK_FALSE;
7273    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7274    loader_platform_thread_lock_mutex(&globalLock);
7275    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7276    if (pCB) {
7277        skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
7278        pCB->status |= CBSTATUS_VIEWPORT_SET;
7279        pCB->viewports.resize(viewportCount);
7280        memcpy(pCB->viewports.data(), pViewports, viewportCount * sizeof(VkViewport));
7281    }
7282    loader_platform_thread_unlock_mutex(&globalLock);
7283    if (VK_FALSE == skipCall)
7284        dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
7285}
7286
7287VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7288vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
7289    VkBool32 skipCall = VK_FALSE;
7290    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7291    loader_platform_thread_lock_mutex(&globalLock);
7292    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7293    if (pCB) {
7294        skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
7295        pCB->status |= CBSTATUS_SCISSOR_SET;
7296        pCB->scissors.resize(scissorCount);
7297        memcpy(pCB->scissors.data(), pScissors, scissorCount * sizeof(VkRect2D));
7298    }
7299    loader_platform_thread_unlock_mutex(&globalLock);
7300    if (VK_FALSE == skipCall)
7301        dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
7302}
7303
7304VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
7305    VkBool32 skipCall = VK_FALSE;
7306    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7307    loader_platform_thread_lock_mutex(&globalLock);
7308    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7309    if (pCB) {
7310        skipCall |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
7311        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
7312    }
7313    loader_platform_thread_unlock_mutex(&globalLock);
7314    if (VK_FALSE == skipCall)
7315        dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth);
7316}
7317
7318VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7319vkCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
7320    VkBool32 skipCall = VK_FALSE;
7321    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7322    loader_platform_thread_lock_mutex(&globalLock);
7323    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7324    if (pCB) {
7325        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
7326        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
7327    }
7328    loader_platform_thread_unlock_mutex(&globalLock);
7329    if (VK_FALSE == skipCall)
7330        dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp,
7331                                                         depthBiasSlopeFactor);
7332}
7333
7334VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
7335    VkBool32 skipCall = VK_FALSE;
7336    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7337    loader_platform_thread_lock_mutex(&globalLock);
7338    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7339    if (pCB) {
7340        skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
7341        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
7342    }
7343    loader_platform_thread_unlock_mutex(&globalLock);
7344    if (VK_FALSE == skipCall)
7345        dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants);
7346}
7347
7348VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7349vkCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
7350    VkBool32 skipCall = VK_FALSE;
7351    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7352    loader_platform_thread_lock_mutex(&globalLock);
7353    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7354    if (pCB) {
7355        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
7356        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
7357    }
7358    loader_platform_thread_unlock_mutex(&globalLock);
7359    if (VK_FALSE == skipCall)
7360        dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
7361}
7362
7363VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7364vkCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
7365    VkBool32 skipCall = VK_FALSE;
7366    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7367    loader_platform_thread_lock_mutex(&globalLock);
7368    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7369    if (pCB) {
7370        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
7371        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
7372    }
7373    loader_platform_thread_unlock_mutex(&globalLock);
7374    if (VK_FALSE == skipCall)
7375        dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
7376}
7377
7378VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7379vkCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
7380    VkBool32 skipCall = VK_FALSE;
7381    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7382    loader_platform_thread_lock_mutex(&globalLock);
7383    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7384    if (pCB) {
7385        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7386        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7387    }
7388    loader_platform_thread_unlock_mutex(&globalLock);
7389    if (VK_FALSE == skipCall)
7390        dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
7391}
7392
7393VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7394vkCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
7395    VkBool32 skipCall = VK_FALSE;
7396    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7397    loader_platform_thread_lock_mutex(&globalLock);
7398    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7399    if (pCB) {
7400        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7401        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7402    }
7403    loader_platform_thread_unlock_mutex(&globalLock);
7404    if (VK_FALSE == skipCall)
7405        dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference);
7406}
7407
7408VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7409vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
7410                        uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7411                        const uint32_t *pDynamicOffsets) {
7412    VkBool32 skipCall = VK_FALSE;
7413    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7414    loader_platform_thread_lock_mutex(&globalLock);
7415    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7416    if (pCB) {
7417        if (pCB->state == CB_RECORDING) {
7418            // Track total count of dynamic descriptor types to make sure we have an offset for each one
7419            uint32_t totalDynamicDescriptors = 0;
7420            string errorString = "";
7421            uint32_t lastSetIndex = firstSet + setCount - 1;
7422            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size())
7423                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7424            VkDescriptorSet oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
7425            for (uint32_t i = 0; i < setCount; i++) {
7426                SET_NODE *pSet = getSetNode(dev_data, pDescriptorSets[i]);
7427                if (pSet) {
7428                    pCB->lastBound[pipelineBindPoint].uniqueBoundSets.insert(pDescriptorSets[i]);
7429                    pSet->boundCmdBuffers.insert(commandBuffer);
7430                    pCB->lastBound[pipelineBindPoint].pipelineLayout = layout;
7431                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pDescriptorSets[i];
7432                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7433                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7434                                        DRAWSTATE_NONE, "DS", "DS %#" PRIxLEAST64 " bound on pipeline %s",
7435                                        (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
7436                    if (!pSet->pUpdateStructs && (pSet->descriptorCount != 0)) {
7437                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7438                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
7439                                            __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
7440                                            "DS %#" PRIxLEAST64
7441                                            " bound but it was never updated. You may want to either update it or not bind it.",
7442                                            (uint64_t)pDescriptorSets[i]);
7443                    }
7444                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
7445                    if (!verify_set_layout_compatibility(dev_data, pSet, layout, i + firstSet, errorString)) {
7446                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7447                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
7448                                            __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
7449                                            "descriptorSet #%u being bound is not compatible with overlapping layout in "
7450                                            "pipelineLayout due to: %s",
7451                                            i, errorString.c_str());
7452                    }
7453                    if (pSet->pLayout->dynamicDescriptorCount) {
7454                        // First make sure we won't overstep bounds of pDynamicOffsets array
7455                        if ((totalDynamicDescriptors + pSet->pLayout->dynamicDescriptorCount) > dynamicOffsetCount) {
7456                            skipCall |=
7457                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7458                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7459                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7460                                        "descriptorSet #%u (%#" PRIxLEAST64
7461                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7462                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7463                                        i, (uint64_t)pDescriptorSets[i], pSet->pLayout->dynamicDescriptorCount,
7464                                        (dynamicOffsetCount - totalDynamicDescriptors));
7465                        } else { // Validate and store dynamic offsets with the set
7466                            // Validate Dynamic Offset Minimums
7467                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
7468                            for (uint32_t d = 0; d < pSet->descriptorCount; d++) {
7469                                if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7470                                    if (vk_safe_modulo(
7471                                            pDynamicOffsets[cur_dyn_offset],
7472                                            dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment) !=
7473                                        0) {
7474                                        skipCall |= log_msg(
7475                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7476                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7477                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
7478                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7479                                            "device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64,
7480                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7481                                            dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment);
7482                                    }
7483                                    cur_dyn_offset++;
7484                                } else if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7485                                    if (vk_safe_modulo(
7486                                            pDynamicOffsets[cur_dyn_offset],
7487                                            dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment) !=
7488                                        0) {
7489                                        skipCall |= log_msg(
7490                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7491                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7492                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
7493                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7494                                            "device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64,
7495                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7496                                            dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment);
7497                                    }
7498                                    cur_dyn_offset++;
7499                                }
7500                            }
7501                            // Keep running total of dynamic descriptor count to verify at the end
7502                            totalDynamicDescriptors += pSet->pLayout->dynamicDescriptorCount;
7503                        }
7504                    }
7505                } else {
7506                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7507                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7508                                        DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS %#" PRIxLEAST64 " that doesn't exist!",
7509                                        (uint64_t)pDescriptorSets[i]);
7510                }
7511                skipCall |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7512                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7513                if (firstSet > 0) { // Check set #s below the first bound set
7514                    for (uint32_t i = 0; i < firstSet; ++i) {
7515                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7516                            !verify_set_layout_compatibility(
7517                                dev_data, dev_data->setMap[pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i]], layout, i,
7518                                errorString)) {
7519                            skipCall |= log_msg(
7520                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7521                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7522                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7523                                "DescriptorSetDS %#" PRIxLEAST64
7524                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7525                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7526                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7527                        }
7528                    }
7529                }
7530                // Check if newly last bound set invalidates any remaining bound sets
7531                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7532                    if (oldFinalBoundSet &&
7533                        !verify_set_layout_compatibility(dev_data, dev_data->setMap[oldFinalBoundSet], layout, lastSetIndex,
7534                                                         errorString)) {
7535                        skipCall |=
7536                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7537                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)oldFinalBoundSet, __LINE__,
7538                                    DRAWSTATE_NONE, "DS", "DescriptorSetDS %#" PRIxLEAST64
7539                                                          " previously bound as set #%u is incompatible with set %#" PRIxLEAST64
7540                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
7541                                                          "disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7542                                    (uint64_t)oldFinalBoundSet, lastSetIndex,
7543                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7544                                    lastSetIndex + 1, (uint64_t)layout);
7545                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7546                    }
7547                }
7548            }
7549            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7550            if (totalDynamicDescriptors != dynamicOffsetCount) {
7551                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7552                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7553                                    DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7554                                    "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7555                                    "is %u. It should exactly match the number of dynamic descriptors.",
7556                                    setCount, totalDynamicDescriptors, dynamicOffsetCount);
7557            }
7558            // Save dynamicOffsets bound to this CB
7559            for (uint32_t i = 0; i < dynamicOffsetCount; i++) {
7560                pCB->lastBound[pipelineBindPoint].dynamicOffsets.emplace_back(pDynamicOffsets[i]);
7561            }
7562        } else {
7563            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7564        }
7565    }
7566    loader_platform_thread_unlock_mutex(&globalLock);
7567    if (VK_FALSE == skipCall)
7568        dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7569                                                               pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7570}
7571
7572VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7573vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
7574    VkBool32 skipCall = VK_FALSE;
7575    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7576    loader_platform_thread_lock_mutex(&globalLock);
7577#if MTMERGESOURCE
7578    VkDeviceMemory mem;
7579    skipCall =
7580        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7581    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7582    if (cb_data != dev_data->commandBufferMap.end()) {
7583        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindIndexBuffer()"); };
7584        cb_data->second->validate_functions.push_back(function);
7585    }
7586    // TODO : Somewhere need to verify that IBs have correct usage state flagged
7587#endif
7588    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7589    if (pCB) {
7590        skipCall |= addCmd(dev_data, pCB, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7591        VkDeviceSize offset_align = 0;
7592        switch (indexType) {
7593        case VK_INDEX_TYPE_UINT16:
7594            offset_align = 2;
7595            break;
7596        case VK_INDEX_TYPE_UINT32:
7597            offset_align = 4;
7598            break;
7599        default:
7600            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7601            break;
7602        }
7603        if (!offset_align || (offset % offset_align)) {
7604            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7605                                DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7606                                "vkCmdBindIndexBuffer() offset (%#" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7607                                offset, string_VkIndexType(indexType));
7608        }
7609        pCB->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7610    }
7611    loader_platform_thread_unlock_mutex(&globalLock);
7612    if (VK_FALSE == skipCall)
7613        dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7614}
7615
7616void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7617    uint32_t end = firstBinding + bindingCount;
7618    if (pCB->currentDrawData.buffers.size() < end) {
7619        pCB->currentDrawData.buffers.resize(end);
7620    }
7621    for (uint32_t i = 0; i < bindingCount; ++i) {
7622        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7623    }
7624}
7625
7626void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7627
7628VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7629                                                                  uint32_t bindingCount, const VkBuffer *pBuffers,
7630                                                                  const VkDeviceSize *pOffsets) {
7631    VkBool32 skipCall = VK_FALSE;
7632    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7633    loader_platform_thread_lock_mutex(&globalLock);
7634#if MTMERGESOURCE
7635    for (uint32_t i = 0; i < bindingCount; ++i) {
7636        VkDeviceMemory mem;
7637        skipCall |= get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)(pBuffers[i]),
7638                                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7639        auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7640        if (cb_data != dev_data->commandBufferMap.end()) {
7641            std::function<VkBool32()> function =
7642                [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindVertexBuffers()"); };
7643            cb_data->second->validate_functions.push_back(function);
7644        }
7645    }
7646    // TODO : Somewhere need to verify that VBs have correct usage state flagged
7647#endif
7648    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7649    if (pCB) {
7650        addCmd(dev_data, pCB, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7651        updateResourceTracking(pCB, firstBinding, bindingCount, pBuffers);
7652    } else {
7653        skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7654    }
7655    loader_platform_thread_unlock_mutex(&globalLock);
7656    if (VK_FALSE == skipCall)
7657        dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7658}
7659
7660/* expects globalLock to be held by caller */
7661bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
7662    bool skip_call = false;
7663
7664    for (auto imageView : pCB->updateImages) {
7665        auto iv_data = dev_data->imageViewMap.find(imageView);
7666        if (iv_data == dev_data->imageViewMap.end())
7667            continue;
7668        VkImage image = iv_data->second.image;
7669        VkDeviceMemory mem;
7670        skip_call |=
7671            get_mem_binding_from_object(dev_data, pCB->commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7672        std::function<VkBool32()> function = [=]() {
7673            set_memory_valid(dev_data, mem, true, image);
7674            return VK_FALSE;
7675        };
7676        pCB->validate_functions.push_back(function);
7677    }
7678    for (auto buffer : pCB->updateBuffers) {
7679        VkDeviceMemory mem;
7680        skip_call |= get_mem_binding_from_object(dev_data, pCB->commandBuffer, (uint64_t)buffer,
7681                                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7682        std::function<VkBool32()> function = [=]() {
7683            set_memory_valid(dev_data, mem, true);
7684            return VK_FALSE;
7685        };
7686        pCB->validate_functions.push_back(function);
7687    }
7688    return skip_call;
7689}
7690
7691VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7692                                                     uint32_t firstVertex, uint32_t firstInstance) {
7693    VkBool32 skipCall = VK_FALSE;
7694    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7695    loader_platform_thread_lock_mutex(&globalLock);
7696    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7697    if (pCB) {
7698        skipCall |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
7699        pCB->drawCount[DRAW]++;
7700        skipCall |= validate_and_update_draw_state(dev_data, pCB, VK_FALSE, VK_PIPELINE_BIND_POINT_GRAPHICS);
7701        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7702        // TODO : Need to pass commandBuffer as srcObj here
7703        skipCall |=
7704            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7705                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW]++);
7706        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7707        if (VK_FALSE == skipCall) {
7708            updateResourceTrackingOnDraw(pCB);
7709        }
7710        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
7711    }
7712    loader_platform_thread_unlock_mutex(&globalLock);
7713    if (VK_FALSE == skipCall)
7714        dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
7715}
7716
7717VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
7718                                                            uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
7719                                                            uint32_t firstInstance) {
7720    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7721    VkBool32 skipCall = VK_FALSE;
7722    loader_platform_thread_lock_mutex(&globalLock);
7723    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7724    if (pCB) {
7725        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
7726        pCB->drawCount[DRAW_INDEXED]++;
7727        skipCall |= validate_and_update_draw_state(dev_data, pCB, VK_TRUE, VK_PIPELINE_BIND_POINT_GRAPHICS);
7728        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7729        // TODO : Need to pass commandBuffer as srcObj here
7730        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7731                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7732                            "vkCmdDrawIndexed() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
7733        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7734        if (VK_FALSE == skipCall) {
7735            updateResourceTrackingOnDraw(pCB);
7736        }
7737        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
7738    }
7739    loader_platform_thread_unlock_mutex(&globalLock);
7740    if (VK_FALSE == skipCall)
7741        dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
7742                                                        firstInstance);
7743}
7744
7745VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7746vkCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7747    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7748    VkBool32 skipCall = VK_FALSE;
7749    loader_platform_thread_lock_mutex(&globalLock);
7750#if MTMERGESOURCE
7751    VkDeviceMemory mem;
7752    // MTMTODO : merge with code below
7753    skipCall =
7754        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7755    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndirect");
7756#endif
7757    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7758    if (pCB) {
7759        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
7760        pCB->drawCount[DRAW_INDIRECT]++;
7761        skipCall |= validate_and_update_draw_state(dev_data, pCB, VK_FALSE, VK_PIPELINE_BIND_POINT_GRAPHICS);
7762        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7763        // TODO : Need to pass commandBuffer as srcObj here
7764        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7765                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7766                            "vkCmdDrawIndirect() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
7767        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7768        if (VK_FALSE == skipCall) {
7769            updateResourceTrackingOnDraw(pCB);
7770        }
7771        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndirect");
7772    }
7773    loader_platform_thread_unlock_mutex(&globalLock);
7774    if (VK_FALSE == skipCall)
7775        dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
7776}
7777
7778VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7779vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7780    VkBool32 skipCall = VK_FALSE;
7781    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7782    loader_platform_thread_lock_mutex(&globalLock);
7783#if MTMERGESOURCE
7784    VkDeviceMemory mem;
7785    // MTMTODO : merge with code below
7786    skipCall =
7787        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7788    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndexedIndirect");
7789#endif
7790    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7791    if (pCB) {
7792        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
7793        pCB->drawCount[DRAW_INDEXED_INDIRECT]++;
7794        skipCall |= validate_and_update_draw_state(dev_data, pCB, VK_TRUE, VK_PIPELINE_BIND_POINT_GRAPHICS);
7795        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7796        // TODO : Need to pass commandBuffer as srcObj here
7797        skipCall |=
7798            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7799                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call #%" PRIu64 ", reporting DS state:",
7800                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
7801        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7802        if (VK_FALSE == skipCall) {
7803            updateResourceTrackingOnDraw(pCB);
7804        }
7805        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexedIndirect");
7806    }
7807    loader_platform_thread_unlock_mutex(&globalLock);
7808    if (VK_FALSE == skipCall)
7809        dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
7810}
7811
7812VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7813    VkBool32 skipCall = VK_FALSE;
7814    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7815    loader_platform_thread_lock_mutex(&globalLock);
7816    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7817    if (pCB) {
7818        // TODO : Re-enable validate_and_update_draw_state() when it supports compute shaders
7819        // skipCall |= validate_and_update_draw_state(dev_data, pCB, VK_FALSE, VK_PIPELINE_BIND_POINT_COMPUTE);
7820        // TODO : Call below is temporary until call above can be re-enabled
7821        update_shader_storage_images_and_buffers(dev_data, pCB);
7822        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7823        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
7824        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
7825    }
7826    loader_platform_thread_unlock_mutex(&globalLock);
7827    if (VK_FALSE == skipCall)
7828        dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
7829}
7830
7831VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7832vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7833    VkBool32 skipCall = VK_FALSE;
7834    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7835    loader_platform_thread_lock_mutex(&globalLock);
7836#if MTMERGESOURCE
7837    VkDeviceMemory mem;
7838    skipCall =
7839        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7840    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDispatchIndirect");
7841#endif
7842    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7843    if (pCB) {
7844        // TODO : Re-enable validate_and_update_draw_state() when it supports compute shaders
7845        // skipCall |= validate_and_update_draw_state(dev_data, pCB, VK_FALSE, VK_PIPELINE_BIND_POINT_COMPUTE);
7846        // TODO : Call below is temporary until call above can be re-enabled
7847        update_shader_storage_images_and_buffers(dev_data, pCB);
7848        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7849        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
7850        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatchIndirect");
7851    }
7852    loader_platform_thread_unlock_mutex(&globalLock);
7853    if (VK_FALSE == skipCall)
7854        dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset);
7855}
7856
7857VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7858                                                           uint32_t regionCount, const VkBufferCopy *pRegions) {
7859    VkBool32 skipCall = VK_FALSE;
7860    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7861    loader_platform_thread_lock_mutex(&globalLock);
7862#if MTMERGESOURCE
7863    VkDeviceMemory mem;
7864    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7865    skipCall =
7866        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7867    if (cb_data != dev_data->commandBufferMap.end()) {
7868        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBuffer()"); };
7869        cb_data->second->validate_functions.push_back(function);
7870    }
7871    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7872    skipCall |=
7873        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7874    if (cb_data != dev_data->commandBufferMap.end()) {
7875        std::function<VkBool32()> function = [=]() {
7876            set_memory_valid(dev_data, mem, true);
7877            return VK_FALSE;
7878        };
7879        cb_data->second->validate_functions.push_back(function);
7880    }
7881    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7882    // Validate that SRC & DST buffers have correct usage flags set
7883    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
7884                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7885    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7886                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7887#endif
7888    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7889    if (pCB) {
7890        skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
7891        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBuffer");
7892    }
7893    loader_platform_thread_unlock_mutex(&globalLock);
7894    if (VK_FALSE == skipCall)
7895        dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7896}
7897
7898VkBool32 VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, VkImageSubresourceLayers subLayers,
7899                                 VkImageLayout srcImageLayout) {
7900    VkBool32 skip_call = VK_FALSE;
7901
7902    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7903    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7904    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7905        uint32_t layer = i + subLayers.baseArrayLayer;
7906        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7907        IMAGE_CMD_BUF_LAYOUT_NODE node;
7908        if (!FindLayout(pCB, srcImage, sub, node)) {
7909            SetLayout(pCB, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
7910            continue;
7911        }
7912        if (node.layout != srcImageLayout) {
7913            // TODO: Improve log message in the next pass
7914            skip_call |=
7915                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7916                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
7917                                                                        "and doesn't match the current layout %s.",
7918                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
7919        }
7920    }
7921    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
7922        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7923            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7924            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7925                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7926                                 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
7927        } else {
7928            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7929                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
7930                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
7931                                 string_VkImageLayout(srcImageLayout));
7932        }
7933    }
7934    return skip_call;
7935}
7936
7937VkBool32 VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkImageSubresourceLayers subLayers,
7938                               VkImageLayout destImageLayout) {
7939    VkBool32 skip_call = VK_FALSE;
7940
7941    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7942    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7943    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7944        uint32_t layer = i + subLayers.baseArrayLayer;
7945        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7946        IMAGE_CMD_BUF_LAYOUT_NODE node;
7947        if (!FindLayout(pCB, destImage, sub, node)) {
7948            SetLayout(pCB, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
7949            continue;
7950        }
7951        if (node.layout != destImageLayout) {
7952            skip_call |=
7953                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7954                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7955                                                                        "doesn't match the current layout %s.",
7956                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7957        }
7958    }
7959    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7960        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7961            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7962            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7963                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7964                                 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7965        } else {
7966            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7967                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
7968                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
7969                                 string_VkImageLayout(destImageLayout));
7970        }
7971    }
7972    return skip_call;
7973}
7974
7975VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7976vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7977               VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
7978    VkBool32 skipCall = VK_FALSE;
7979    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7980    loader_platform_thread_lock_mutex(&globalLock);
7981#if MTMERGESOURCE
7982    VkDeviceMemory mem;
7983    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7984    // Validate that src & dst images have correct usage flags set
7985    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7986    if (cb_data != dev_data->commandBufferMap.end()) {
7987        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImage()", srcImage); };
7988        cb_data->second->validate_functions.push_back(function);
7989    }
7990    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
7991    skipCall |=
7992        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7993    if (cb_data != dev_data->commandBufferMap.end()) {
7994        std::function<VkBool32()> function = [=]() {
7995            set_memory_valid(dev_data, mem, true, dstImage);
7996            return VK_FALSE;
7997        };
7998        cb_data->second->validate_functions.push_back(function);
7999    }
8000    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
8001    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8002                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8003    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8004                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8005#endif
8006    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8007    if (pCB) {
8008        skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGE, "vkCmdCopyImage()");
8009        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImage");
8010        for (uint32_t i = 0; i < regionCount; ++i) {
8011            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].srcSubresource, srcImageLayout);
8012            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].dstSubresource, dstImageLayout);
8013        }
8014    }
8015    loader_platform_thread_unlock_mutex(&globalLock);
8016    if (VK_FALSE == skipCall)
8017        dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8018                                                      regionCount, pRegions);
8019}
8020
8021VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8022vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8023               VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
8024    VkBool32 skipCall = VK_FALSE;
8025    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8026    loader_platform_thread_lock_mutex(&globalLock);
8027#if MTMERGESOURCE
8028    VkDeviceMemory mem;
8029    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8030    // Validate that src & dst images have correct usage flags set
8031    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8032    if (cb_data != dev_data->commandBufferMap.end()) {
8033        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBlitImage()", srcImage); };
8034        cb_data->second->validate_functions.push_back(function);
8035    }
8036    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
8037    skipCall |=
8038        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8039    if (cb_data != dev_data->commandBufferMap.end()) {
8040        std::function<VkBool32()> function = [=]() {
8041            set_memory_valid(dev_data, mem, true, dstImage);
8042            return VK_FALSE;
8043        };
8044        cb_data->second->validate_functions.push_back(function);
8045    }
8046    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
8047    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8048                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8049    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8050                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8051#endif
8052    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8053    if (pCB) {
8054        skipCall |= addCmd(dev_data, pCB, CMD_BLITIMAGE, "vkCmdBlitImage()");
8055        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBlitImage");
8056    }
8057    loader_platform_thread_unlock_mutex(&globalLock);
8058    if (VK_FALSE == skipCall)
8059        dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8060                                                      regionCount, pRegions, filter);
8061}
8062
8063VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
8064                                                                  VkImage dstImage, VkImageLayout dstImageLayout,
8065                                                                  uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8066    VkBool32 skipCall = VK_FALSE;
8067    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8068    loader_platform_thread_lock_mutex(&globalLock);
8069#if MTMERGESOURCE
8070    VkDeviceMemory mem;
8071    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8072    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8073    if (cb_data != dev_data->commandBufferMap.end()) {
8074        std::function<VkBool32()> function = [=]() {
8075            set_memory_valid(dev_data, mem, true, dstImage);
8076            return VK_FALSE;
8077        };
8078        cb_data->second->validate_functions.push_back(function);
8079    }
8080    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
8081    skipCall |=
8082        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8083    if (cb_data != dev_data->commandBufferMap.end()) {
8084        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBufferToImage()"); };
8085        cb_data->second->validate_functions.push_back(function);
8086    }
8087    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
8088    // Validate that src buff & dst image have correct usage flags set
8089    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
8090                                            "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
8091    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8092                                           "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8093#endif
8094    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8095    if (pCB) {
8096        skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
8097        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBufferToImage");
8098        for (uint32_t i = 0; i < regionCount; ++i) {
8099            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].imageSubresource, dstImageLayout);
8100        }
8101    }
8102    loader_platform_thread_unlock_mutex(&globalLock);
8103    if (VK_FALSE == skipCall)
8104        dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
8105                                                              pRegions);
8106}
8107
8108VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
8109                                                                  VkImageLayout srcImageLayout, VkBuffer dstBuffer,
8110                                                                  uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8111    VkBool32 skipCall = VK_FALSE;
8112    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8113    loader_platform_thread_lock_mutex(&globalLock);
8114#if MTMERGESOURCE
8115    VkDeviceMemory mem;
8116    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8117    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8118    if (cb_data != dev_data->commandBufferMap.end()) {
8119        std::function<VkBool32()> function =
8120            [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImageToBuffer()", srcImage); };
8121        cb_data->second->validate_functions.push_back(function);
8122    }
8123    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
8124    skipCall |=
8125        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8126    if (cb_data != dev_data->commandBufferMap.end()) {
8127        std::function<VkBool32()> function = [=]() {
8128            set_memory_valid(dev_data, mem, true);
8129            return VK_FALSE;
8130        };
8131        cb_data->second->validate_functions.push_back(function);
8132    }
8133    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
8134    // Validate that dst buff & src image have correct usage flags set
8135    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8136                                           "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8137    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8138                                            "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8139#endif
8140    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8141    if (pCB) {
8142        skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
8143        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImageToBuffer");
8144        for (uint32_t i = 0; i < regionCount; ++i) {
8145            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].imageSubresource, srcImageLayout);
8146        }
8147    }
8148    loader_platform_thread_unlock_mutex(&globalLock);
8149    if (VK_FALSE == skipCall)
8150        dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
8151                                                              pRegions);
8152}
8153
8154VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
8155                                                             VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
8156    VkBool32 skipCall = VK_FALSE;
8157    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8158    loader_platform_thread_lock_mutex(&globalLock);
8159#if MTMERGESOURCE
8160    VkDeviceMemory mem;
8161    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8162    skipCall =
8163        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8164    if (cb_data != dev_data->commandBufferMap.end()) {
8165        std::function<VkBool32()> function = [=]() {
8166            set_memory_valid(dev_data, mem, true);
8167            return VK_FALSE;
8168        };
8169        cb_data->second->validate_functions.push_back(function);
8170    }
8171    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdUpdateBuffer");
8172    // Validate that dst buff has correct usage flags set
8173    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8174                                            "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8175#endif
8176    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8177    if (pCB) {
8178        skipCall |= addCmd(dev_data, pCB, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
8179        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyUpdateBuffer");
8180    }
8181    loader_platform_thread_unlock_mutex(&globalLock);
8182    if (VK_FALSE == skipCall)
8183        dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
8184}
8185
8186VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8187vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
8188    VkBool32 skipCall = VK_FALSE;
8189    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8190    loader_platform_thread_lock_mutex(&globalLock);
8191#if MTMERGESOURCE
8192    VkDeviceMemory mem;
8193    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8194    skipCall =
8195        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8196    if (cb_data != dev_data->commandBufferMap.end()) {
8197        std::function<VkBool32()> function = [=]() {
8198            set_memory_valid(dev_data, mem, true);
8199            return VK_FALSE;
8200        };
8201        cb_data->second->validate_functions.push_back(function);
8202    }
8203    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdFillBuffer");
8204    // Validate that dst buff has correct usage flags set
8205    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8206                                            "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8207#endif
8208    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8209    if (pCB) {
8210        skipCall |= addCmd(dev_data, pCB, CMD_FILLBUFFER, "vkCmdFillBuffer()");
8211        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyFillBuffer");
8212    }
8213    loader_platform_thread_unlock_mutex(&globalLock);
8214    if (VK_FALSE == skipCall)
8215        dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
8216}
8217
8218VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
8219                                                                 const VkClearAttachment *pAttachments, uint32_t rectCount,
8220                                                                 const VkClearRect *pRects) {
8221    VkBool32 skipCall = VK_FALSE;
8222    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8223    loader_platform_thread_lock_mutex(&globalLock);
8224    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8225    if (pCB) {
8226        skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
8227        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
8228        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
8229            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
8230            // TODO : commandBuffer should be srcObj
8231            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
8232            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
8233            // call CmdClearAttachments
8234            // Otherwise this seems more like a performance warning.
8235            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8236                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
8237                                "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
8238                                " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
8239                                (uint64_t)(commandBuffer));
8240        }
8241        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments");
8242    }
8243
8244    // Validate that attachment is in reference list of active subpass
8245    if (pCB->activeRenderPass) {
8246        const VkRenderPassCreateInfo *pRPCI = dev_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
8247        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
8248
8249        for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
8250            const VkClearAttachment *attachment = &pAttachments[attachment_idx];
8251            if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
8252                VkBool32 found = VK_FALSE;
8253                for (uint32_t i = 0; i < pSD->colorAttachmentCount; i++) {
8254                    if (attachment->colorAttachment == pSD->pColorAttachments[i].attachment) {
8255                        found = VK_TRUE;
8256                        break;
8257                    }
8258                }
8259                if (VK_FALSE == found) {
8260                    skipCall |= log_msg(
8261                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8262                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8263                        "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d",
8264                        attachment->colorAttachment, pCB->activeSubpass);
8265                }
8266            } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
8267                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
8268                    (pSD->pDepthStencilAttachment->attachment ==
8269                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
8270
8271                    skipCall |= log_msg(
8272                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8273                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8274                        "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found "
8275                        "in active subpass %d",
8276                        attachment->colorAttachment,
8277                        (pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED,
8278                        pCB->activeSubpass);
8279                }
8280            }
8281        }
8282    }
8283    loader_platform_thread_unlock_mutex(&globalLock);
8284    if (VK_FALSE == skipCall)
8285        dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8286}
8287
8288VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
8289                                                                VkImageLayout imageLayout, const VkClearColorValue *pColor,
8290                                                                uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
8291    VkBool32 skipCall = VK_FALSE;
8292    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8293    loader_platform_thread_lock_mutex(&globalLock);
8294#if MTMERGESOURCE
8295    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8296    VkDeviceMemory mem;
8297    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8298    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8299    if (cb_data != dev_data->commandBufferMap.end()) {
8300        std::function<VkBool32()> function = [=]() {
8301            set_memory_valid(dev_data, mem, true, image);
8302            return VK_FALSE;
8303        };
8304        cb_data->second->validate_functions.push_back(function);
8305    }
8306    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearColorImage");
8307#endif
8308    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8309    if (pCB) {
8310        skipCall |= addCmd(dev_data, pCB, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
8311        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearColorImage");
8312    }
8313    loader_platform_thread_unlock_mutex(&globalLock);
8314    if (VK_FALSE == skipCall)
8315        dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
8316}
8317
8318VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8319vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8320                            const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
8321                            const VkImageSubresourceRange *pRanges) {
8322    VkBool32 skipCall = VK_FALSE;
8323    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8324    loader_platform_thread_lock_mutex(&globalLock);
8325#if MTMERGESOURCE
8326    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8327    VkDeviceMemory mem;
8328    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8329    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8330    if (cb_data != dev_data->commandBufferMap.end()) {
8331        std::function<VkBool32()> function = [=]() {
8332            set_memory_valid(dev_data, mem, true, image);
8333            return VK_FALSE;
8334        };
8335        cb_data->second->validate_functions.push_back(function);
8336    }
8337    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearDepthStencilImage");
8338#endif
8339    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8340    if (pCB) {
8341        skipCall |= addCmd(dev_data, pCB, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
8342        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearDepthStencilImage");
8343    }
8344    loader_platform_thread_unlock_mutex(&globalLock);
8345    if (VK_FALSE == skipCall)
8346        dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
8347                                                                   pRanges);
8348}
8349
8350VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8351vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8352                  VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
8353    VkBool32 skipCall = VK_FALSE;
8354    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8355    loader_platform_thread_lock_mutex(&globalLock);
8356#if MTMERGESOURCE
8357    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8358    VkDeviceMemory mem;
8359    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8360    if (cb_data != dev_data->commandBufferMap.end()) {
8361        std::function<VkBool32()> function =
8362            [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdResolveImage()", srcImage); };
8363        cb_data->second->validate_functions.push_back(function);
8364    }
8365    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
8366    skipCall |=
8367        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8368    if (cb_data != dev_data->commandBufferMap.end()) {
8369        std::function<VkBool32()> function = [=]() {
8370            set_memory_valid(dev_data, mem, true, dstImage);
8371            return VK_FALSE;
8372        };
8373        cb_data->second->validate_functions.push_back(function);
8374    }
8375    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
8376#endif
8377    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8378    if (pCB) {
8379        skipCall |= addCmd(dev_data, pCB, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
8380        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResolveImage");
8381    }
8382    loader_platform_thread_unlock_mutex(&globalLock);
8383    if (VK_FALSE == skipCall)
8384        dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8385                                                         regionCount, pRegions);
8386}
8387
8388bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8389    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8390    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8391    if (pCB) {
8392        pCB->eventToStageMap[event] = stageMask;
8393    }
8394    auto queue_data = dev_data->queueMap.find(queue);
8395    if (queue_data != dev_data->queueMap.end()) {
8396        queue_data->second.eventToStageMap[event] = stageMask;
8397    }
8398    return false;
8399}
8400
8401VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8402vkCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8403    VkBool32 skipCall = VK_FALSE;
8404    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8405    loader_platform_thread_lock_mutex(&globalLock);
8406    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8407    if (pCB) {
8408        skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
8409        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
8410        pCB->events.push_back(event);
8411        std::function<bool(VkQueue)> eventUpdate =
8412            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
8413        pCB->eventUpdates.push_back(eventUpdate);
8414    }
8415    loader_platform_thread_unlock_mutex(&globalLock);
8416    if (VK_FALSE == skipCall)
8417        dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask);
8418}
8419
8420VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8421vkCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8422    VkBool32 skipCall = VK_FALSE;
8423    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8424    loader_platform_thread_lock_mutex(&globalLock);
8425    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8426    if (pCB) {
8427        skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
8428        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
8429        pCB->events.push_back(event);
8430        std::function<bool(VkQueue)> eventUpdate =
8431            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
8432        pCB->eventUpdates.push_back(eventUpdate);
8433    }
8434    loader_platform_thread_unlock_mutex(&globalLock);
8435    if (VK_FALSE == skipCall)
8436        dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask);
8437}
8438
8439VkBool32 TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount, const VkImageMemoryBarrier *pImgMemBarriers) {
8440    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8441    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8442    VkBool32 skip = VK_FALSE;
8443    uint32_t levelCount = 0;
8444    uint32_t layerCount = 0;
8445
8446    for (uint32_t i = 0; i < memBarrierCount; ++i) {
8447        auto mem_barrier = &pImgMemBarriers[i];
8448        if (!mem_barrier)
8449            continue;
8450        // TODO: Do not iterate over every possibility - consolidate where
8451        // possible
8452        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
8453
8454        for (uint32_t j = 0; j < levelCount; j++) {
8455            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
8456            for (uint32_t k = 0; k < layerCount; k++) {
8457                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
8458                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
8459                IMAGE_CMD_BUF_LAYOUT_NODE node;
8460                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
8461                    SetLayout(pCB, mem_barrier->image, sub,
8462                              IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
8463                    continue;
8464                }
8465                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
8466                    // TODO: Set memory invalid which is in mem_tracker currently
8467                } else if (node.layout != mem_barrier->oldLayout) {
8468                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8469                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
8470                                                                                    "when current layout is %s.",
8471                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
8472                }
8473                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
8474            }
8475        }
8476    }
8477    return skip;
8478}
8479
8480// Print readable FlagBits in FlagMask
8481std::string string_VkAccessFlags(VkAccessFlags accessMask) {
8482    std::string result;
8483    std::string separator;
8484
8485    if (accessMask == 0) {
8486        result = "[None]";
8487    } else {
8488        result = "[";
8489        for (auto i = 0; i < 32; i++) {
8490            if (accessMask & (1 << i)) {
8491                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
8492                separator = " | ";
8493            }
8494        }
8495        result = result + "]";
8496    }
8497    return result;
8498}
8499
8500// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
8501// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
8502// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
8503VkBool32 ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8504                          const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits, const char *type) {
8505    VkBool32 skip_call = VK_FALSE;
8506
8507    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
8508        if (accessMask & !(required_bit | optional_bits)) {
8509            // TODO: Verify against Valid Use
8510            skip_call |=
8511                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8512                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8513                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8514        }
8515    } else {
8516        if (!required_bit) {
8517            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8518                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
8519                                                                  "%s when layout is %s, unless the app has previously added a "
8520                                                                  "barrier for this transition.",
8521                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
8522                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
8523        } else {
8524            std::string opt_bits;
8525            if (optional_bits != 0) {
8526                std::stringstream ss;
8527                ss << optional_bits;
8528                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
8529            }
8530            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8531                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
8532                                                                  "layout is %s, unless the app has previously added a barrier for "
8533                                                                  "this transition.",
8534                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
8535                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
8536        }
8537    }
8538    return skip_call;
8539}
8540
8541VkBool32 ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8542                                     const VkImageLayout &layout, const char *type) {
8543    VkBool32 skip_call = VK_FALSE;
8544    switch (layout) {
8545    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
8546        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
8547                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
8548        break;
8549    }
8550    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
8551        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
8552                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
8553        break;
8554    }
8555    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
8556        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
8557        break;
8558    }
8559    case VK_IMAGE_LAYOUT_PREINITIALIZED: {
8560        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
8561        break;
8562    }
8563    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
8564        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8565                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8566        break;
8567    }
8568    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
8569        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8570                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8571        break;
8572    }
8573    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
8574        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
8575        break;
8576    }
8577    case VK_IMAGE_LAYOUT_UNDEFINED: {
8578        if (accessMask != 0) {
8579            // TODO: Verify against Valid Use section spec
8580            skip_call |=
8581                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8582                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8583                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8584        }
8585        break;
8586    }
8587    case VK_IMAGE_LAYOUT_GENERAL:
8588    default: { break; }
8589    }
8590    return skip_call;
8591}
8592
8593VkBool32 ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8594                          const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
8595                          const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
8596                          const VkImageMemoryBarrier *pImageMemBarriers) {
8597    VkBool32 skip_call = VK_FALSE;
8598    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8599    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8600    if (pCB->activeRenderPass && memBarrierCount) {
8601        if (!dev_data->renderPassMap[pCB->activeRenderPass]->hasSelfDependency[pCB->activeSubpass]) {
8602            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8603                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
8604                                                                  "with no self dependency specified.",
8605                                 funcName, pCB->activeSubpass);
8606        }
8607    }
8608    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
8609        auto mem_barrier = &pImageMemBarriers[i];
8610        auto image_data = dev_data->imageMap.find(mem_barrier->image);
8611        if (image_data != dev_data->imageMap.end()) {
8612            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
8613            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
8614            if (image_data->second.createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
8615                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
8616                // be VK_QUEUE_FAMILY_IGNORED
8617                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
8618                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8619                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8620                                         "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
8621                                         "VK_SHARING_MODE_CONCURRENT.  Src and dst "
8622                                         " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
8623                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8624                }
8625            } else {
8626                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
8627                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
8628                // or both be a valid queue family
8629                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
8630                    (src_q_f_index != dst_q_f_index)) {
8631                    skip_call |=
8632                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8633                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
8634                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
8635                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
8636                                                                     "must be.",
8637                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8638                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
8639                           ((src_q_f_index >= dev_data->physDevProperties.queue_family_properties.size()) ||
8640                            (dst_q_f_index >= dev_data->physDevProperties.queue_family_properties.size()))) {
8641                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8642                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8643                                         "%s: Image 0x%" PRIx64 " was created with sharingMode "
8644                                         "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
8645                                         " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
8646                                         "queueFamilies crated for this device.",
8647                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
8648                                         dst_q_f_index, dev_data->physDevProperties.queue_family_properties.size());
8649                }
8650            }
8651        }
8652
8653        if (mem_barrier) {
8654            skip_call |=
8655                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
8656            skip_call |=
8657                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
8658            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
8659                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8660                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
8661                                                         "PREINITIALIZED.",
8662                        funcName);
8663            }
8664            auto image_data = dev_data->imageMap.find(mem_barrier->image);
8665            VkFormat format;
8666            uint32_t arrayLayers, mipLevels;
8667            bool imageFound = false;
8668            if (image_data != dev_data->imageMap.end()) {
8669                format = image_data->second.createInfo.format;
8670                arrayLayers = image_data->second.createInfo.arrayLayers;
8671                mipLevels = image_data->second.createInfo.mipLevels;
8672                imageFound = true;
8673            } else if (dev_data->device_extensions.wsi_enabled) {
8674                auto imageswap_data = dev_data->device_extensions.imageToSwapchainMap.find(mem_barrier->image);
8675                if (imageswap_data != dev_data->device_extensions.imageToSwapchainMap.end()) {
8676                    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(imageswap_data->second);
8677                    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
8678                        format = swapchain_data->second->createInfo.imageFormat;
8679                        arrayLayers = swapchain_data->second->createInfo.imageArrayLayers;
8680                        mipLevels = 1;
8681                        imageFound = true;
8682                    }
8683                }
8684            }
8685            if (imageFound) {
8686                if (vk_format_is_depth_and_stencil(format) &&
8687                    (!(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
8688                     !(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))) {
8689                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8690                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth and stencil format and thus must "
8691                                                             "have both VK_IMAGE_ASPECT_DEPTH_BIT and "
8692                                                             "VK_IMAGE_ASPECT_STENCIL_BIT set.",
8693                            funcName);
8694                }
8695                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
8696                                     ? 1
8697                                     : mem_barrier->subresourceRange.layerCount;
8698                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
8699                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8700                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
8701                                                             "baseArrayLayer (%d) and layerCount (%d) be less "
8702                                                             "than or equal to the total number of layers (%d).",
8703                            funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
8704                            arrayLayers);
8705                }
8706                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
8707                                     ? 1
8708                                     : mem_barrier->subresourceRange.levelCount;
8709                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
8710                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8711                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
8712                                                             "(%d) and levelCount (%d) be less than or equal to "
8713                                                             "the total number of levels (%d).",
8714                            funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
8715                            mipLevels);
8716                }
8717            }
8718        }
8719    }
8720    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
8721        auto mem_barrier = &pBufferMemBarriers[i];
8722        if (pCB->activeRenderPass) {
8723            skip_call |=
8724                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8725                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
8726        }
8727        if (!mem_barrier)
8728            continue;
8729
8730        // Validate buffer barrier queue family indices
8731        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8732             mem_barrier->srcQueueFamilyIndex >= dev_data->physDevProperties.queue_family_properties.size()) ||
8733            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8734             mem_barrier->dstQueueFamilyIndex >= dev_data->physDevProperties.queue_family_properties.size())) {
8735            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8736                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8737                                 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
8738                                 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
8739                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8740                                 dev_data->physDevProperties.queue_family_properties.size());
8741        }
8742
8743        auto buffer_data = dev_data->bufferMap.find(mem_barrier->buffer);
8744        uint64_t buffer_size =
8745            buffer_data->second.create_info ? reinterpret_cast<uint64_t &>(buffer_data->second.create_info->size) : 0;
8746        if (buffer_data != dev_data->bufferMap.end()) {
8747            if (mem_barrier->offset >= buffer_size) {
8748                skip_call |=
8749                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8750                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64
8751                                                             " whose sum is not less than total size %" PRIu64 ".",
8752                            funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8753                            reinterpret_cast<const uint64_t &>(mem_barrier->offset), buffer_size);
8754            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
8755                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8756                                     __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
8757                                     "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64 " and size %" PRIu64
8758                                     " whose sum is greater than total size %" PRIu64 ".",
8759                                     funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8760                                     reinterpret_cast<const uint64_t &>(mem_barrier->offset),
8761                                     reinterpret_cast<const uint64_t &>(mem_barrier->size), buffer_size);
8762            }
8763        }
8764    }
8765    return skip_call;
8766}
8767
8768bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
8769    bool skip_call = false;
8770    VkPipelineStageFlags stageMask = 0;
8771    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
8772    for (uint32_t i = 0; i < eventCount; ++i) {
8773        auto event = pCB->events[firstEventIndex + i];
8774        auto queue_data = dev_data->queueMap.find(queue);
8775        if (queue_data == dev_data->queueMap.end())
8776            return false;
8777        auto event_data = queue_data->second.eventToStageMap.find(event);
8778        if (event_data != queue_data->second.eventToStageMap.end()) {
8779            stageMask |= event_data->second;
8780        } else {
8781            auto global_event_data = dev_data->eventMap.find(event);
8782            if (global_event_data == dev_data->eventMap.end()) {
8783                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8784                                     reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
8785                                     "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
8786                                     reinterpret_cast<const uint64_t &>(event));
8787            } else {
8788                stageMask |= global_event_data->second.stageMask;
8789            }
8790        }
8791    }
8792    if (sourceStageMask != stageMask) {
8793        skip_call |=
8794            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8795                    DRAWSTATE_INVALID_EVENT, "DS",
8796                    "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%x which must be the bitwise OR of the "
8797                    "stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with vkSetEvent.",
8798                    sourceStageMask);
8799    }
8800    return skip_call;
8801}
8802
8803VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8804vkCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
8805                VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8806                uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8807                uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8808    VkBool32 skipCall = VK_FALSE;
8809    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8810    loader_platform_thread_lock_mutex(&globalLock);
8811    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8812    if (pCB) {
8813        auto firstEventIndex = pCB->events.size();
8814        for (uint32_t i = 0; i < eventCount; ++i) {
8815            pCB->waitedEvents.push_back(pEvents[i]);
8816            pCB->events.push_back(pEvents[i]);
8817        }
8818        std::function<bool(VkQueue)> eventUpdate =
8819            std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask);
8820        pCB->eventUpdates.push_back(eventUpdate);
8821        if (pCB->state == CB_RECORDING) {
8822            skipCall |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
8823        } else {
8824            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
8825        }
8826        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8827        skipCall |=
8828            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8829                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8830    }
8831    loader_platform_thread_unlock_mutex(&globalLock);
8832    if (VK_FALSE == skipCall)
8833        dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
8834                                                       memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8835                                                       pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8836}
8837
8838VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8839vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
8840                     VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8841                     uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8842                     uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8843    VkBool32 skipCall = VK_FALSE;
8844    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8845    loader_platform_thread_lock_mutex(&globalLock);
8846    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8847    if (pCB) {
8848        skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8849        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8850        skipCall |=
8851            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8852                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8853    }
8854    loader_platform_thread_unlock_mutex(&globalLock);
8855    if (VK_FALSE == skipCall)
8856        dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
8857                                                            memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8858                                                            pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8859}
8860
8861VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8862vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8863    VkBool32 skipCall = VK_FALSE;
8864    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8865    loader_platform_thread_lock_mutex(&globalLock);
8866    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8867    if (pCB) {
8868        QueryObject query = {queryPool, slot};
8869        pCB->activeQueries.insert(query);
8870        if (!pCB->startedQueries.count(query)) {
8871            pCB->startedQueries.insert(query);
8872        }
8873        skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8874    }
8875    loader_platform_thread_unlock_mutex(&globalLock);
8876    if (VK_FALSE == skipCall)
8877        dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
8878}
8879
8880VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8881    VkBool32 skipCall = VK_FALSE;
8882    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8883    loader_platform_thread_lock_mutex(&globalLock);
8884    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8885    if (pCB) {
8886        QueryObject query = {queryPool, slot};
8887        if (!pCB->activeQueries.count(query)) {
8888            skipCall |=
8889                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8890                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool %" PRIu64 ", index %d",
8891                        (uint64_t)(queryPool), slot);
8892        } else {
8893            pCB->activeQueries.erase(query);
8894        }
8895        pCB->queryToStateMap[query] = 1;
8896        if (pCB->state == CB_RECORDING) {
8897            skipCall |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
8898        } else {
8899            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
8900        }
8901    }
8902    loader_platform_thread_unlock_mutex(&globalLock);
8903    if (VK_FALSE == skipCall)
8904        dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
8905}
8906
8907VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8908vkCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
8909    VkBool32 skipCall = VK_FALSE;
8910    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8911    loader_platform_thread_lock_mutex(&globalLock);
8912    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8913    if (pCB) {
8914        for (uint32_t i = 0; i < queryCount; i++) {
8915            QueryObject query = {queryPool, firstQuery + i};
8916            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
8917            pCB->queryToStateMap[query] = 0;
8918        }
8919        if (pCB->state == CB_RECORDING) {
8920            skipCall |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8921        } else {
8922            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
8923        }
8924        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
8925    }
8926    loader_platform_thread_unlock_mutex(&globalLock);
8927    if (VK_FALSE == skipCall)
8928        dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8929}
8930
8931VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8932vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
8933                          VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
8934    VkBool32 skipCall = VK_FALSE;
8935    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8936    loader_platform_thread_lock_mutex(&globalLock);
8937    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8938#if MTMERGESOURCE
8939    VkDeviceMemory mem;
8940    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8941    skipCall |=
8942        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8943    if (cb_data != dev_data->commandBufferMap.end()) {
8944        std::function<VkBool32()> function = [=]() {
8945            set_memory_valid(dev_data, mem, true);
8946            return VK_FALSE;
8947        };
8948        cb_data->second->validate_functions.push_back(function);
8949    }
8950    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyQueryPoolResults");
8951    // Validate that DST buffer has correct usage flags set
8952    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8953                                            "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8954#endif
8955    if (pCB) {
8956        for (uint32_t i = 0; i < queryCount; i++) {
8957            QueryObject query = {queryPool, firstQuery + i};
8958            if (!pCB->queryToStateMap[query]) {
8959                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8960                                    __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
8961                                    "Requesting a copy from query to buffer with invalid query: queryPool %" PRIu64 ", index %d",
8962                                    (uint64_t)(queryPool), firstQuery + i);
8963            }
8964        }
8965        if (pCB->state == CB_RECORDING) {
8966            skipCall |= addCmd(dev_data, pCB, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8967        } else {
8968            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
8969        }
8970        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyQueryPoolResults");
8971    }
8972    loader_platform_thread_unlock_mutex(&globalLock);
8973    if (VK_FALSE == skipCall)
8974        dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
8975                                                                 dstOffset, stride, flags);
8976}
8977
8978VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
8979                                                              VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
8980                                                              const void *pValues) {
8981    bool skipCall = false;
8982    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8983    loader_platform_thread_lock_mutex(&globalLock);
8984    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8985    if (pCB) {
8986        if (pCB->state == CB_RECORDING) {
8987            skipCall |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8988        } else {
8989            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
8990        }
8991    }
8992    if ((offset + size) > dev_data->physDevProperties.properties.limits.maxPushConstantsSize) {
8993        skipCall |= validatePushConstantSize(dev_data, offset, size, "vkCmdPushConstants()");
8994    }
8995    // TODO : Add warning if push constant update doesn't align with range
8996    loader_platform_thread_unlock_mutex(&globalLock);
8997    if (!skipCall)
8998        dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8999}
9000
9001VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
9002vkCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
9003    VkBool32 skipCall = VK_FALSE;
9004    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9005    loader_platform_thread_lock_mutex(&globalLock);
9006    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9007    if (pCB) {
9008        QueryObject query = {queryPool, slot};
9009        pCB->queryToStateMap[query] = 1;
9010        if (pCB->state == CB_RECORDING) {
9011            skipCall |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
9012        } else {
9013            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
9014        }
9015    }
9016    loader_platform_thread_unlock_mutex(&globalLock);
9017    if (VK_FALSE == skipCall)
9018        dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
9019}
9020
9021VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
9022                                                                   const VkAllocationCallbacks *pAllocator,
9023                                                                   VkFramebuffer *pFramebuffer) {
9024    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9025    VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
9026    if (VK_SUCCESS == result) {
9027        // Shadow create info and store in map
9028        loader_platform_thread_lock_mutex(&globalLock);
9029
9030        auto & fbNode = dev_data->frameBufferMap[*pFramebuffer];
9031        fbNode.createInfo = *pCreateInfo;
9032        if (pCreateInfo->pAttachments) {
9033            auto attachments = new VkImageView[pCreateInfo->attachmentCount];
9034            memcpy(attachments,
9035                   pCreateInfo->pAttachments,
9036                   pCreateInfo->attachmentCount * sizeof(VkImageView));
9037            fbNode.createInfo.pAttachments = attachments;
9038        }
9039        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9040            VkImageView view = pCreateInfo->pAttachments[i];
9041            auto view_data = dev_data->imageViewMap.find(view);
9042            if (view_data == dev_data->imageViewMap.end()) {
9043                continue;
9044            }
9045            MT_FB_ATTACHMENT_INFO fb_info;
9046            get_mem_binding_from_object(dev_data, device, (uint64_t)(view_data->second.image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9047                                        &fb_info.mem);
9048            fb_info.image = view_data->second.image;
9049            fbNode.attachments.push_back(fb_info);
9050        }
9051
9052        loader_platform_thread_unlock_mutex(&globalLock);
9053    }
9054    return result;
9055}
9056
9057VkBool32 FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
9058                        std::unordered_set<uint32_t> &processed_nodes) {
9059    // If we have already checked this node we have not found a dependency path so return false.
9060    if (processed_nodes.count(index))
9061        return VK_FALSE;
9062    processed_nodes.insert(index);
9063    const DAGNode &node = subpass_to_node[index];
9064    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
9065    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
9066        for (auto elem : node.prev) {
9067            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
9068                return VK_TRUE;
9069        }
9070    } else {
9071        return VK_TRUE;
9072    }
9073    return VK_FALSE;
9074}
9075
9076VkBool32 CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
9077                               const std::vector<DAGNode> &subpass_to_node, VkBool32 &skip_call) {
9078    VkBool32 result = VK_TRUE;
9079    // Loop through all subpasses that share the same attachment and make sure a dependency exists
9080    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
9081        if (subpass == dependent_subpasses[k])
9082            continue;
9083        const DAGNode &node = subpass_to_node[subpass];
9084        // Check for a specified dependency between the two nodes. If one exists we are done.
9085        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
9086        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
9087        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
9088            // If no dependency exits an implicit dependency still might. If so, warn and if not throw an error.
9089            std::unordered_set<uint32_t> processed_nodes;
9090            if (FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
9091                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes)) {
9092                // TODO: Verify against Valid Use section of spec
9093                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9094                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9095                                     "A dependency between subpasses %d and %d must exist but only an implicit one is specified.",
9096                                     subpass, dependent_subpasses[k]);
9097            } else {
9098                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9099                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9100                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
9101                                     dependent_subpasses[k]);
9102                result = VK_FALSE;
9103            }
9104        }
9105    }
9106    return result;
9107}
9108
9109VkBool32 CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
9110                        const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, VkBool32 &skip_call) {
9111    const DAGNode &node = subpass_to_node[index];
9112    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
9113    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9114    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9115        if (attachment == subpass.pColorAttachments[j].attachment)
9116            return VK_TRUE;
9117    }
9118    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9119        if (attachment == subpass.pDepthStencilAttachment->attachment)
9120            return VK_TRUE;
9121    }
9122    VkBool32 result = VK_FALSE;
9123    // Loop through previous nodes and see if any of them write to the attachment.
9124    for (auto elem : node.prev) {
9125        result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
9126    }
9127    // If the attachment was written to by a previous node than this node needs to preserve it.
9128    if (result && depth > 0) {
9129        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9130        VkBool32 has_preserved = VK_FALSE;
9131        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9132            if (subpass.pPreserveAttachments[j] == attachment) {
9133                has_preserved = VK_TRUE;
9134                break;
9135            }
9136        }
9137        if (has_preserved == VK_FALSE) {
9138            skip_call |=
9139                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9140                        DRAWSTATE_INVALID_RENDERPASS, "DS",
9141                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
9142        }
9143    }
9144    return result;
9145}
9146
9147template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
9148    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
9149           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
9150}
9151
9152bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
9153    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
9154            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
9155}
9156
9157VkBool32 ValidateDependencies(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin,
9158                              const std::vector<DAGNode> &subpass_to_node) {
9159    VkBool32 skip_call = VK_FALSE;
9160    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
9161    const VkRenderPassCreateInfo *pCreateInfo = my_data->renderPassMap.at(pRenderPassBegin->renderPass)->pCreateInfo;
9162    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
9163    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
9164    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
9165    // Find overlapping attachments
9166    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9167        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
9168            VkImageView viewi = pFramebufferInfo->pAttachments[i];
9169            VkImageView viewj = pFramebufferInfo->pAttachments[j];
9170            if (viewi == viewj) {
9171                overlapping_attachments[i].push_back(j);
9172                overlapping_attachments[j].push_back(i);
9173                continue;
9174            }
9175            auto view_data_i = my_data->imageViewMap.find(viewi);
9176            auto view_data_j = my_data->imageViewMap.find(viewj);
9177            if (view_data_i == my_data->imageViewMap.end() || view_data_j == my_data->imageViewMap.end()) {
9178                continue;
9179            }
9180            if (view_data_i->second.image == view_data_j->second.image &&
9181                isRegionOverlapping(view_data_i->second.subresourceRange, view_data_j->second.subresourceRange)) {
9182                overlapping_attachments[i].push_back(j);
9183                overlapping_attachments[j].push_back(i);
9184                continue;
9185            }
9186            auto image_data_i = my_data->imageMap.find(view_data_i->second.image);
9187            auto image_data_j = my_data->imageMap.find(view_data_j->second.image);
9188            if (image_data_i == my_data->imageMap.end() || image_data_j == my_data->imageMap.end()) {
9189                continue;
9190            }
9191            if (image_data_i->second.mem == image_data_j->second.mem &&
9192                isRangeOverlapping(image_data_i->second.memOffset, image_data_i->second.memSize, image_data_j->second.memOffset,
9193                                   image_data_j->second.memSize)) {
9194                overlapping_attachments[i].push_back(j);
9195                overlapping_attachments[j].push_back(i);
9196            }
9197        }
9198    }
9199    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
9200        uint32_t attachment = i;
9201        for (auto other_attachment : overlapping_attachments[i]) {
9202            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9203                skip_call |=
9204                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9205                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9206                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9207                            attachment, other_attachment);
9208            }
9209            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9210                skip_call |=
9211                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9212                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9213                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9214                            other_attachment, attachment);
9215            }
9216        }
9217    }
9218    // Find for each attachment the subpasses that use them.
9219    unordered_set<uint32_t> attachmentIndices;
9220    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9221        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9222        attachmentIndices.clear();
9223        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9224            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9225            input_attachment_to_subpass[attachment].push_back(i);
9226            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9227                input_attachment_to_subpass[overlapping_attachment].push_back(i);
9228            }
9229        }
9230        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9231            uint32_t attachment = subpass.pColorAttachments[j].attachment;
9232            output_attachment_to_subpass[attachment].push_back(i);
9233            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9234                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9235            }
9236            attachmentIndices.insert(attachment);
9237        }
9238        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9239            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9240            output_attachment_to_subpass[attachment].push_back(i);
9241            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9242                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9243            }
9244
9245            if (attachmentIndices.count(attachment)) {
9246                skip_call |=
9247                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
9248                            0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9249                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).",
9250                            attachment, i);
9251            }
9252        }
9253    }
9254    // If there is a dependency needed make sure one exists
9255    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9256        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9257        // If the attachment is an input then all subpasses that output must have a dependency relationship
9258        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9259            const uint32_t &attachment = subpass.pInputAttachments[j].attachment;
9260            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9261        }
9262        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
9263        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9264            const uint32_t &attachment = subpass.pColorAttachments[j].attachment;
9265            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9266            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9267        }
9268        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9269            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
9270            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9271            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9272        }
9273    }
9274    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
9275    // written.
9276    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9277        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9278        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9279            CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
9280        }
9281    }
9282    return skip_call;
9283}
9284
9285VkBool32 ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
9286    VkBool32 skip = VK_FALSE;
9287
9288    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9289        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9290        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9291            if (subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
9292                subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
9293                if (subpass.pInputAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
9294                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9295                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9296                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9297                                    "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
9298                } else {
9299                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9300                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9301                                    "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
9302                                    string_VkImageLayout(subpass.pInputAttachments[j].layout));
9303                }
9304            }
9305        }
9306        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9307            if (subpass.pColorAttachments[j].layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
9308                if (subpass.pColorAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
9309                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9310                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9311                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9312                                    "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
9313                } else {
9314                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9315                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9316                                    "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
9317                                    string_VkImageLayout(subpass.pColorAttachments[j].layout));
9318                }
9319            }
9320        }
9321        if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9322            if (subpass.pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
9323                if (subpass.pDepthStencilAttachment->layout == VK_IMAGE_LAYOUT_GENERAL) {
9324                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9325                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9326                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9327                                    "Layout for depth attachment is GENERAL but should be DEPTH_STENCIL_ATTACHMENT_OPTIMAL.");
9328                } else {
9329                    skip |=
9330                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9331                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9332                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.",
9333                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
9334                }
9335            }
9336        }
9337    }
9338    return skip;
9339}
9340
9341VkBool32 CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9342                       std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
9343    VkBool32 skip_call = VK_FALSE;
9344    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9345        DAGNode &subpass_node = subpass_to_node[i];
9346        subpass_node.pass = i;
9347    }
9348    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9349        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
9350        if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
9351            dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9352            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9353                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
9354                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
9355        } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
9356            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9357                                 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
9358        } else if (dependency.srcSubpass == dependency.dstSubpass) {
9359            has_self_dependency[dependency.srcSubpass] = true;
9360        }
9361        if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9362            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
9363        }
9364        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
9365            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
9366        }
9367    }
9368    return skip_call;
9369}
9370
9371
9372VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
9373                                                                    const VkAllocationCallbacks *pAllocator,
9374                                                                    VkShaderModule *pShaderModule) {
9375    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9376    VkBool32 skip_call = VK_FALSE;
9377    if (!shader_is_spirv(pCreateInfo)) {
9378        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
9379                             /* dev */ 0, __LINE__, SHADER_CHECKER_NON_SPIRV_SHADER, "SC", "Shader is not SPIR-V");
9380    }
9381
9382    if (VK_FALSE != skip_call)
9383        return VK_ERROR_VALIDATION_FAILED_EXT;
9384
9385    VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
9386
9387    if (res == VK_SUCCESS) {
9388        loader_platform_thread_lock_mutex(&globalLock);
9389        my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
9390        loader_platform_thread_unlock_mutex(&globalLock);
9391    }
9392    return res;
9393}
9394
9395VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9396                                                                  const VkAllocationCallbacks *pAllocator,
9397                                                                  VkRenderPass *pRenderPass) {
9398    VkBool32 skip_call = VK_FALSE;
9399    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9400    loader_platform_thread_lock_mutex(&globalLock);
9401    // Create DAG
9402    std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
9403    std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
9404    skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
9405    // Validate
9406    skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
9407    if (VK_FALSE != skip_call) {
9408        loader_platform_thread_unlock_mutex(&globalLock);
9409        return VK_ERROR_VALIDATION_FAILED_EXT;
9410    }
9411    loader_platform_thread_unlock_mutex(&globalLock);
9412    VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
9413    if (VK_SUCCESS == result) {
9414        loader_platform_thread_lock_mutex(&globalLock);
9415        // TODOSC : Merge in tracking of renderpass from shader_checker
9416        // Shadow create info and store in map
9417        VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
9418        if (pCreateInfo->pAttachments) {
9419            localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount];
9420            memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments,
9421                   localRPCI->attachmentCount * sizeof(VkAttachmentDescription));
9422        }
9423        if (pCreateInfo->pSubpasses) {
9424            localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount];
9425            memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription));
9426
9427            for (uint32_t i = 0; i < localRPCI->subpassCount; i++) {
9428                VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i];
9429                const uint32_t attachmentCount = subpass->inputAttachmentCount +
9430                                                 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) +
9431                                                 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
9432                VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount];
9433
9434                memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount);
9435                subpass->pInputAttachments = attachments;
9436                attachments += subpass->inputAttachmentCount;
9437
9438                memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9439                subpass->pColorAttachments = attachments;
9440                attachments += subpass->colorAttachmentCount;
9441
9442                if (subpass->pResolveAttachments) {
9443                    memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9444                    subpass->pResolveAttachments = attachments;
9445                    attachments += subpass->colorAttachmentCount;
9446                }
9447
9448                if (subpass->pDepthStencilAttachment) {
9449                    memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1);
9450                    subpass->pDepthStencilAttachment = attachments;
9451                    attachments += 1;
9452                }
9453
9454                memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount);
9455                subpass->pPreserveAttachments = &attachments->attachment;
9456            }
9457        }
9458        if (pCreateInfo->pDependencies) {
9459            localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
9460            memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies,
9461                   localRPCI->dependencyCount * sizeof(VkSubpassDependency));
9462        }
9463        dev_data->renderPassMap[*pRenderPass] = new RENDER_PASS_NODE(localRPCI);
9464        dev_data->renderPassMap[*pRenderPass]->hasSelfDependency = has_self_dependency;
9465        dev_data->renderPassMap[*pRenderPass]->subpassToNode = subpass_to_node;
9466#if MTMERGESOURCE
9467        // MTMTODO : Merge with code from above to eliminate duplication
9468        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9469            VkAttachmentDescription desc = pCreateInfo->pAttachments[i];
9470            MT_PASS_ATTACHMENT_INFO pass_info;
9471            pass_info.load_op = desc.loadOp;
9472            pass_info.store_op = desc.storeOp;
9473            pass_info.attachment = i;
9474            dev_data->renderPassMap[*pRenderPass]->attachments.push_back(pass_info);
9475        }
9476        // TODO: Maybe fill list and then copy instead of locking
9477        std::unordered_map<uint32_t, bool> &attachment_first_read = dev_data->renderPassMap[*pRenderPass]->attachment_first_read;
9478        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout =
9479            dev_data->renderPassMap[*pRenderPass]->attachment_first_layout;
9480        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9481            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9482            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9483                uint32_t attachment = subpass.pColorAttachments[j].attachment;
9484                if (attachment >= pCreateInfo->attachmentCount) {
9485                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9486                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9487                                         "Color attachment %d cannot be greater than the total number of attachments %d.",
9488                                         attachment, pCreateInfo->attachmentCount);
9489                    continue;
9490                }
9491                if (attachment_first_read.count(attachment))
9492                    continue;
9493                attachment_first_read.insert(std::make_pair(attachment, false));
9494                attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
9495            }
9496            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9497                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9498                if (attachment >= pCreateInfo->attachmentCount) {
9499                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9500                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9501                                         "Depth stencil attachment %d cannot be greater than the total number of attachments %d.",
9502                                         attachment, pCreateInfo->attachmentCount);
9503                    continue;
9504                }
9505                if (attachment_first_read.count(attachment))
9506                    continue;
9507                attachment_first_read.insert(std::make_pair(attachment, false));
9508                attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
9509            }
9510            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9511                uint32_t attachment = subpass.pInputAttachments[j].attachment;
9512                if (attachment >= pCreateInfo->attachmentCount) {
9513                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9514                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9515                                         "Input attachment %d cannot be greater than the total number of attachments %d.",
9516                                         attachment, pCreateInfo->attachmentCount);
9517                    continue;
9518                }
9519                if (attachment_first_read.count(attachment))
9520                    continue;
9521                attachment_first_read.insert(std::make_pair(attachment, true));
9522                attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
9523            }
9524        }
9525#endif
9526        loader_platform_thread_unlock_mutex(&globalLock);
9527    }
9528    return result;
9529}
9530// Free the renderpass shadow
9531static void deleteRenderPasses(layer_data *my_data) {
9532    if (my_data->renderPassMap.size() <= 0)
9533        return;
9534    for (auto ii = my_data->renderPassMap.begin(); ii != my_data->renderPassMap.end(); ++ii) {
9535        const VkRenderPassCreateInfo *pRenderPassInfo = (*ii).second->pCreateInfo;
9536        delete[] pRenderPassInfo->pAttachments;
9537        if (pRenderPassInfo->pSubpasses) {
9538            for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) {
9539                // Attachements are all allocated in a block, so just need to
9540                //  find the first non-null one to delete
9541                if (pRenderPassInfo->pSubpasses[i].pInputAttachments) {
9542                    delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments;
9543                } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) {
9544                    delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments;
9545                } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) {
9546                    delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments;
9547                } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) {
9548                    delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments;
9549                }
9550            }
9551            delete[] pRenderPassInfo->pSubpasses;
9552        }
9553        delete[] pRenderPassInfo->pDependencies;
9554        delete pRenderPassInfo;
9555        delete (*ii).second;
9556    }
9557    my_data->renderPassMap.clear();
9558}
9559
9560VkBool32 VerifyFramebufferAndRenderPassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9561    VkBool32 skip_call = VK_FALSE;
9562    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9563    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9564    const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
9565    const VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer].createInfo;
9566    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
9567        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9568                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
9569                                                                 "with a different number of attachments.");
9570    }
9571    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9572        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9573        auto image_data = dev_data->imageViewMap.find(image_view);
9574        assert(image_data != dev_data->imageViewMap.end());
9575        const VkImage &image = image_data->second.image;
9576        const VkImageSubresourceRange &subRange = image_data->second.subresourceRange;
9577        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
9578                                             pRenderPassInfo->pAttachments[i].initialLayout};
9579        // TODO: Do not iterate over every possibility - consolidate where possible
9580        for (uint32_t j = 0; j < subRange.levelCount; j++) {
9581            uint32_t level = subRange.baseMipLevel + j;
9582            for (uint32_t k = 0; k < subRange.layerCount; k++) {
9583                uint32_t layer = subRange.baseArrayLayer + k;
9584                VkImageSubresource sub = {subRange.aspectMask, level, layer};
9585                IMAGE_CMD_BUF_LAYOUT_NODE node;
9586                if (!FindLayout(pCB, image, sub, node)) {
9587                    SetLayout(pCB, image, sub, newNode);
9588                    continue;
9589                }
9590                if (newNode.layout != node.layout) {
9591                    skip_call |=
9592                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9593                                DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using attachment %i "
9594                                                                    "where the "
9595                                                                    "initial layout is %s and the layout of the attachment at the "
9596                                                                    "start of the render pass is %s. The layouts must match.",
9597                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
9598                }
9599            }
9600        }
9601    }
9602    return skip_call;
9603}
9604
9605void TransitionSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const int subpass_index) {
9606    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9607    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9608    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9609    if (render_pass_data == dev_data->renderPassMap.end()) {
9610        return;
9611    }
9612    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9613    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9614    if (framebuffer_data == dev_data->frameBufferMap.end()) {
9615        return;
9616    }
9617    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9618    const VkSubpassDescription &subpass = pRenderPassInfo->pSubpasses[subpass_index];
9619    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9620        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pInputAttachments[j].attachment];
9621        SetLayout(dev_data, pCB, image_view, subpass.pInputAttachments[j].layout);
9622    }
9623    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9624        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pColorAttachments[j].attachment];
9625        SetLayout(dev_data, pCB, image_view, subpass.pColorAttachments[j].layout);
9626    }
9627    if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9628        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pDepthStencilAttachment->attachment];
9629        SetLayout(dev_data, pCB, image_view, subpass.pDepthStencilAttachment->layout);
9630    }
9631}
9632
9633VkBool32 validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
9634    VkBool32 skip_call = VK_FALSE;
9635    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
9636        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9637                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
9638                             cmd_name.c_str());
9639    }
9640    return skip_call;
9641}
9642
9643void TransitionFinalSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9644    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9645    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9646    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9647    if (render_pass_data == dev_data->renderPassMap.end()) {
9648        return;
9649    }
9650    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9651    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9652    if (framebuffer_data == dev_data->frameBufferMap.end()) {
9653        return;
9654    }
9655    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9656    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9657        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9658        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
9659    }
9660}
9661
9662bool VerifyRenderAreaBounds(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
9663    bool skip_call = false;
9664    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
9665    if (pRenderPassBegin->renderArea.offset.x < 0 ||
9666        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
9667        pRenderPassBegin->renderArea.offset.y < 0 ||
9668        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
9669        skip_call |= static_cast<bool>(log_msg(
9670            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9671            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
9672            "Cannot execute a render pass with renderArea not within the bound of the "
9673            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
9674            "height %d.",
9675            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
9676            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
9677    }
9678    return skip_call;
9679}
9680
9681VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
9682vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
9683    VkBool32 skipCall = VK_FALSE;
9684    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9685    loader_platform_thread_lock_mutex(&globalLock);
9686    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9687    if (pCB) {
9688        if (pRenderPassBegin && pRenderPassBegin->renderPass) {
9689#if MTMERGE
9690            auto pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9691            if (pass_data != dev_data->renderPassMap.end()) {
9692                RENDER_PASS_NODE* pRPNode = pass_data->second;
9693                pRPNode->fb = pRenderPassBegin->framebuffer;
9694                auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
9695                for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9696                    MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i];
9697                    if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
9698                        if (cb_data != dev_data->commandBufferMap.end()) {
9699                            std::function<VkBool32()> function = [=]() {
9700                                set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9701                                return VK_FALSE;
9702                            };
9703                            cb_data->second->validate_functions.push_back(function);
9704                        }
9705                        VkImageLayout &attachment_layout = pRPNode->attachment_first_layout[pRPNode->attachments[i].attachment];
9706                        if (attachment_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL ||
9707                            attachment_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
9708                            skipCall |=
9709                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9710                                        VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, (uint64_t)(pRenderPassBegin->renderPass), __LINE__,
9711                                        MEMTRACK_INVALID_LAYOUT, "MEM", "Cannot clear attachment %d with invalid first layout %d.",
9712                                        pRPNode->attachments[i].attachment, attachment_layout);
9713                        }
9714                    } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE) {
9715                        if (cb_data != dev_data->commandBufferMap.end()) {
9716                            std::function<VkBool32()> function = [=]() {
9717                                set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9718                                return VK_FALSE;
9719                            };
9720                            cb_data->second->validate_functions.push_back(function);
9721                        }
9722                    } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
9723                        if (cb_data != dev_data->commandBufferMap.end()) {
9724                            std::function<VkBool32()> function = [=]() {
9725                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9726                            };
9727                            cb_data->second->validate_functions.push_back(function);
9728                        }
9729                    }
9730                    if (pRPNode->attachment_first_read[pRPNode->attachments[i].attachment]) {
9731                        if (cb_data != dev_data->commandBufferMap.end()) {
9732                            std::function<VkBool32()> function = [=]() {
9733                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9734                            };
9735                            cb_data->second->validate_functions.push_back(function);
9736                        }
9737                    }
9738                }
9739            }
9740#endif
9741            skipCall |= static_cast<VkBool32>(VerifyRenderAreaBounds(dev_data, pRenderPassBegin));
9742            skipCall |= VerifyFramebufferAndRenderPassLayouts(commandBuffer, pRenderPassBegin);
9743            auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9744            if (render_pass_data != dev_data->renderPassMap.end()) {
9745                skipCall |= ValidateDependencies(dev_data, pRenderPassBegin, render_pass_data->second->subpassToNode);
9746            }
9747            skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass");
9748            skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass");
9749            skipCall |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
9750            pCB->activeRenderPass = pRenderPassBegin->renderPass;
9751            // This is a shallow copy as that is all that is needed for now
9752            pCB->activeRenderPassBeginInfo = *pRenderPassBegin;
9753            pCB->activeSubpass = 0;
9754            pCB->activeSubpassContents = contents;
9755            pCB->framebuffer = pRenderPassBegin->framebuffer;
9756            // Connect this framebuffer to this cmdBuffer
9757            dev_data->frameBufferMap[pCB->framebuffer].referencingCmdBuffers.insert(pCB->commandBuffer);
9758        } else {
9759            skipCall |=
9760                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9761                        DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
9762        }
9763    }
9764    loader_platform_thread_unlock_mutex(&globalLock);
9765    if (VK_FALSE == skipCall) {
9766        dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
9767        loader_platform_thread_lock_mutex(&globalLock);
9768        // This is a shallow copy as that is all that is needed for now
9769        dev_data->renderPassBeginInfo = *pRenderPassBegin;
9770        dev_data->currentSubpass = 0;
9771        loader_platform_thread_unlock_mutex(&globalLock);
9772    }
9773}
9774
9775VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
9776    VkBool32 skipCall = VK_FALSE;
9777    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9778    loader_platform_thread_lock_mutex(&globalLock);
9779    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9780    TransitionSubpassLayouts(commandBuffer, &dev_data->renderPassBeginInfo, ++dev_data->currentSubpass);
9781    if (pCB) {
9782        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
9783        skipCall |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
9784        pCB->activeSubpass++;
9785        pCB->activeSubpassContents = contents;
9786        TransitionSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
9787        if (pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline) {
9788            skipCall |= validatePipelineState(dev_data, pCB, VK_PIPELINE_BIND_POINT_GRAPHICS,
9789                                              pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
9790        }
9791        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
9792    }
9793    loader_platform_thread_unlock_mutex(&globalLock);
9794    if (VK_FALSE == skipCall)
9795        dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents);
9796}
9797
9798VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(VkCommandBuffer commandBuffer) {
9799    VkBool32 skipCall = VK_FALSE;
9800    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9801    loader_platform_thread_lock_mutex(&globalLock);
9802#if MTMERGESOURCE
9803    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
9804    if (cb_data != dev_data->commandBufferMap.end()) {
9805        auto pass_data = dev_data->renderPassMap.find(cb_data->second->activeRenderPass);
9806        if (pass_data != dev_data->renderPassMap.end()) {
9807            RENDER_PASS_NODE* pRPNode = pass_data->second;
9808            for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9809                MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i];
9810                if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
9811                    if (cb_data != dev_data->commandBufferMap.end()) {
9812                        std::function<VkBool32()> function = [=]() {
9813                            set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9814                            return VK_FALSE;
9815                        };
9816                        cb_data->second->validate_functions.push_back(function);
9817                    }
9818                } else if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE) {
9819                    if (cb_data != dev_data->commandBufferMap.end()) {
9820                        std::function<VkBool32()> function = [=]() {
9821                            set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9822                            return VK_FALSE;
9823                        };
9824                        cb_data->second->validate_functions.push_back(function);
9825                    }
9826                }
9827            }
9828        }
9829    }
9830#endif
9831    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9832    TransitionFinalSubpassLayouts(commandBuffer, &dev_data->renderPassBeginInfo);
9833    if (pCB) {
9834        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
9835        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
9836        skipCall |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9837        TransitionFinalSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo);
9838        pCB->activeRenderPass = 0;
9839        pCB->activeSubpass = 0;
9840    }
9841    loader_platform_thread_unlock_mutex(&globalLock);
9842    if (VK_FALSE == skipCall)
9843        dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer);
9844}
9845
9846bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9847                                 VkRenderPass primaryPass, uint32_t primaryAttach, uint32_t secondaryAttach, const char *msg) {
9848    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9849                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9850                   "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9851                   " that is not compatible with the current render pass %" PRIx64 "."
9852                   "Attachment %" PRIu32 " is not compatible with %" PRIu32 ". %s",
9853                   (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass), primaryAttach, secondaryAttach,
9854                   msg);
9855}
9856
9857bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9858                                     uint32_t primaryAttach, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9859                                     uint32_t secondaryAttach, bool is_multi) {
9860    bool skip_call = false;
9861    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9862    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9863    if (primary_data->second->pCreateInfo->attachmentCount <= primaryAttach) {
9864        primaryAttach = VK_ATTACHMENT_UNUSED;
9865    }
9866    if (secondary_data->second->pCreateInfo->attachmentCount <= secondaryAttach) {
9867        secondaryAttach = VK_ATTACHMENT_UNUSED;
9868    }
9869    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
9870        return skip_call;
9871    }
9872    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
9873        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9874                                                 secondaryAttach, "The first is unused while the second is not.");
9875        return skip_call;
9876    }
9877    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
9878        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9879                                                 secondaryAttach, "The second is unused while the first is not.");
9880        return skip_call;
9881    }
9882    if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].format !=
9883        secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].format) {
9884        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9885                                                 secondaryAttach, "They have different formats.");
9886    }
9887    if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].samples !=
9888        secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].samples) {
9889        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9890                                                 secondaryAttach, "They have different samples.");
9891    }
9892    if (is_multi &&
9893        primary_data->second->pCreateInfo->pAttachments[primaryAttach].flags !=
9894            secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].flags) {
9895        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9896                                                 secondaryAttach, "They have different flags.");
9897    }
9898    return skip_call;
9899}
9900
9901bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9902                                  VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass, const int subpass, bool is_multi) {
9903    bool skip_call = false;
9904    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9905    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9906    const VkSubpassDescription &primary_desc = primary_data->second->pCreateInfo->pSubpasses[subpass];
9907    const VkSubpassDescription &secondary_desc = secondary_data->second->pCreateInfo->pSubpasses[subpass];
9908    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
9909    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
9910        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
9911        if (i < primary_desc.inputAttachmentCount) {
9912            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
9913        }
9914        if (i < secondary_desc.inputAttachmentCount) {
9915            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
9916        }
9917        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_input_attach, secondaryBuffer,
9918                                                     secondaryPass, secondary_input_attach, is_multi);
9919    }
9920    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
9921    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
9922        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
9923        if (i < primary_desc.colorAttachmentCount) {
9924            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
9925        }
9926        if (i < secondary_desc.colorAttachmentCount) {
9927            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
9928        }
9929        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_color_attach, secondaryBuffer,
9930                                                     secondaryPass, secondary_color_attach, is_multi);
9931        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
9932        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
9933            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
9934        }
9935        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
9936            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
9937        }
9938        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_resolve_attach, secondaryBuffer,
9939                                                     secondaryPass, secondary_resolve_attach, is_multi);
9940    }
9941    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
9942    if (primary_desc.pDepthStencilAttachment) {
9943        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
9944    }
9945    if (secondary_desc.pDepthStencilAttachment) {
9946        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
9947    }
9948    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_depthstencil_attach, secondaryBuffer,
9949                                                 secondaryPass, secondary_depthstencil_attach, is_multi);
9950    return skip_call;
9951}
9952
9953bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9954                                     VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass) {
9955    bool skip_call = false;
9956    // Early exit if renderPass objects are identical (and therefore compatible)
9957    if (primaryPass == secondaryPass)
9958        return skip_call;
9959    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9960    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9961    if (primary_data == dev_data->renderPassMap.end() || primary_data->second == nullptr) {
9962        skip_call |=
9963            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9964                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9965                    "vkCmdExecuteCommands() called w/ invalid current Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9966                    (void *)primaryBuffer, (uint64_t)(primaryPass));
9967        return skip_call;
9968    }
9969    if (secondary_data == dev_data->renderPassMap.end() || secondary_data->second == nullptr) {
9970        skip_call |=
9971            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9972                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9973                    "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9974                    (void *)secondaryBuffer, (uint64_t)(secondaryPass));
9975        return skip_call;
9976    }
9977    if (primary_data->second->pCreateInfo->subpassCount != secondary_data->second->pCreateInfo->subpassCount) {
9978        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9979                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9980                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9981                             " that is not compatible with the current render pass %" PRIx64 "."
9982                             "They have a different number of subpasses.",
9983                             (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass));
9984        return skip_call;
9985    }
9986    bool is_multi = primary_data->second->pCreateInfo->subpassCount > 1;
9987    for (uint32_t i = 0; i < primary_data->second->pCreateInfo->subpassCount; ++i) {
9988        skip_call |=
9989            validateSubpassCompatibility(dev_data, primaryBuffer, primaryPass, secondaryBuffer, secondaryPass, i, is_multi);
9990    }
9991    return skip_call;
9992}
9993
9994bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9995                         VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
9996    bool skip_call = false;
9997    if (!pSubCB->beginInfo.pInheritanceInfo) {
9998        return skip_call;
9999    }
10000    VkFramebuffer primary_fb = pCB->framebuffer;
10001    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
10002    if (secondary_fb != VK_NULL_HANDLE) {
10003        if (primary_fb != secondary_fb) {
10004            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10005                                 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10006                                 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a framebuffer %" PRIx64
10007                                 " that is not compatible with the current framebuffer %" PRIx64 ".",
10008                                 (void *)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb));
10009        }
10010        auto fb_data = dev_data->frameBufferMap.find(secondary_fb);
10011        if (fb_data == dev_data->frameBufferMap.end()) {
10012            skip_call |=
10013                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10014                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
10015                                                                          "which has invalid framebuffer %" PRIx64 ".",
10016                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
10017            return skip_call;
10018        }
10019        skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb_data->second.createInfo.renderPass,
10020                                                     secondaryBuffer, pSubCB->beginInfo.pInheritanceInfo->renderPass);
10021    }
10022    return skip_call;
10023}
10024
10025bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
10026    bool skipCall = false;
10027    unordered_set<int> activeTypes;
10028    for (auto queryObject : pCB->activeQueries) {
10029        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10030        if (queryPoolData != dev_data->queryPoolMap.end()) {
10031            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
10032                pSubCB->beginInfo.pInheritanceInfo) {
10033                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
10034                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
10035                    skipCall |= log_msg(
10036                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10037                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10038                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
10039                        "which has invalid active query pool %" PRIx64 ". Pipeline statistics is being queried so the command "
10040                        "buffer must have all bits set on the queryPool.",
10041                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
10042                }
10043            }
10044            activeTypes.insert(queryPoolData->second.createInfo.queryType);
10045        }
10046    }
10047    for (auto queryObject : pSubCB->startedQueries) {
10048        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10049        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
10050            skipCall |=
10051                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10052                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10053                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
10054                        "which has invalid active query pool %" PRIx64 "of type %d but a query of that type has been started on "
10055                        "secondary Cmd Buffer %p.",
10056                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
10057                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
10058        }
10059    }
10060    return skipCall;
10061}
10062
10063VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10064vkCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
10065    VkBool32 skipCall = VK_FALSE;
10066    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10067    loader_platform_thread_lock_mutex(&globalLock);
10068    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10069    if (pCB) {
10070        GLOBAL_CB_NODE *pSubCB = NULL;
10071        for (uint32_t i = 0; i < commandBuffersCount; i++) {
10072            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
10073            if (!pSubCB) {
10074                skipCall |=
10075                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10076                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10077                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p in element %u of pCommandBuffers array.",
10078                            (void *)pCommandBuffers[i], i);
10079            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
10080                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10081                                    __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10082                                    "vkCmdExecuteCommands() called w/ Primary Cmd Buffer %p in element %u of pCommandBuffers "
10083                                    "array. All cmd buffers in pCommandBuffers array must be secondary.",
10084                                    (void *)pCommandBuffers[i], i);
10085            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
10086                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
10087                    skipCall |= log_msg(
10088                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10089                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
10090                        "vkCmdExecuteCommands(): Secondary Command Buffer (%p) executed within render pass (%#" PRIxLEAST64
10091                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
10092                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass);
10093                } else {
10094                    // Make sure render pass is compatible with parent command buffer pass if has continue
10095                    skipCall |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass, pCommandBuffers[i],
10096                                                                pSubCB->beginInfo.pInheritanceInfo->renderPass);
10097                    skipCall |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
10098                }
10099                string errorString = "";
10100                if (!verify_renderpass_compatibility(dev_data, pCB->activeRenderPass,
10101                                                     pSubCB->beginInfo.pInheritanceInfo->renderPass, errorString)) {
10102                    skipCall |= log_msg(
10103                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10104                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
10105                        "vkCmdExecuteCommands(): Secondary Command Buffer (%p) w/ render pass (%#" PRIxLEAST64
10106                        ") is incompatible w/ primary command buffer (%p) w/ render pass (%#" PRIxLEAST64 ") due to: %s",
10107                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
10108                        (uint64_t)pCB->activeRenderPass, errorString.c_str());
10109                }
10110                //  If framebuffer for secondary CB is not NULL, then it must match FB from vkCmdBeginRenderPass()
10111                //   that this CB will be executed in AND framebuffer must have been created w/ RP compatible w/ renderpass
10112                if (pSubCB->beginInfo.pInheritanceInfo->framebuffer) {
10113                    if (pSubCB->beginInfo.pInheritanceInfo->framebuffer != pCB->activeRenderPassBeginInfo.framebuffer) {
10114                        skipCall |= log_msg(
10115                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10116                            (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
10117                            "vkCmdExecuteCommands(): Secondary Command Buffer (%p) references framebuffer (%#" PRIxLEAST64
10118                            ") that does not match framebuffer (%#" PRIxLEAST64 ") in active renderpass (%#" PRIxLEAST64 ").",
10119                            (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer,
10120                            (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass);
10121                    }
10122                }
10123            }
10124            // TODO(mlentine): Move more logic into this method
10125            skipCall |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
10126            skipCall |= validateCommandBufferState(dev_data, pSubCB);
10127            // Secondary cmdBuffers are considered pending execution starting w/
10128            // being recorded
10129            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
10130                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
10131                    skipCall |= log_msg(
10132                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10133                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10134                        "Attempt to simultaneously execute CB %#" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10135                        "set!",
10136                        (uint64_t)(pCB->commandBuffer));
10137                }
10138                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
10139                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
10140                    skipCall |= log_msg(
10141                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10142                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10143                        "vkCmdExecuteCommands(): Secondary Command Buffer (%#" PRIxLEAST64
10144                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
10145                        "(%#" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10146                                          "set, even though it does.",
10147                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
10148                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
10149                }
10150            }
10151            if (!pCB->activeQueries.empty() && !dev_data->physDevProperties.features.inheritedQueries) {
10152                skipCall |=
10153                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10154                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
10155                            "vkCmdExecuteCommands(): Secondary Command Buffer "
10156                            "(%#" PRIxLEAST64 ") cannot be submitted with a query in "
10157                            "flight and inherited queries not "
10158                            "supported on this device.",
10159                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
10160            }
10161            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
10162            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
10163            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
10164        }
10165        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
10166        skipCall |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
10167    }
10168    loader_platform_thread_unlock_mutex(&globalLock);
10169    if (VK_FALSE == skipCall)
10170        dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
10171}
10172
10173VkBool32 ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) {
10174    VkBool32 skip_call = VK_FALSE;
10175    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10176    auto mem_data = dev_data->memObjMap.find(mem);
10177    if ((mem_data != dev_data->memObjMap.end()) && (mem_data->second.image != VK_NULL_HANDLE)) {
10178        std::vector<VkImageLayout> layouts;
10179        if (FindLayouts(dev_data, mem_data->second.image, layouts)) {
10180            for (auto layout : layouts) {
10181                if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
10182                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10183                                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
10184                                                                                         "GENERAL or PREINITIALIZED are supported.",
10185                                         string_VkImageLayout(layout));
10186                }
10187            }
10188        }
10189    }
10190    return skip_call;
10191}
10192
10193VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10194vkMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
10195    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10196
10197    VkBool32 skip_call = VK_FALSE;
10198    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10199    loader_platform_thread_lock_mutex(&globalLock);
10200#if MTMERGESOURCE
10201    DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
10202    if (pMemObj) {
10203        pMemObj->valid = true;
10204        if ((memProps.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
10205            skip_call =
10206                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10207                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
10208                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj %#" PRIxLEAST64, (uint64_t)mem);
10209        }
10210    }
10211    skip_call |= validateMemRange(dev_data, mem, offset, size);
10212    storeMemRanges(dev_data, mem, offset, size);
10213#endif
10214    skip_call |= ValidateMapImageLayouts(device, mem);
10215    loader_platform_thread_unlock_mutex(&globalLock);
10216
10217    if (VK_FALSE == skip_call) {
10218        result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData);
10219#if MTMERGESOURCE
10220        loader_platform_thread_lock_mutex(&globalLock);
10221        initializeAndTrackMemory(dev_data, mem, size, ppData);
10222        loader_platform_thread_unlock_mutex(&globalLock);
10223#endif
10224    }
10225    return result;
10226}
10227
10228#if MTMERGESOURCE
10229VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(VkDevice device, VkDeviceMemory mem) {
10230    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10231    VkBool32 skipCall = VK_FALSE;
10232
10233    loader_platform_thread_lock_mutex(&globalLock);
10234    skipCall |= deleteMemRanges(my_data, mem);
10235    loader_platform_thread_unlock_mutex(&globalLock);
10236    if (VK_FALSE == skipCall) {
10237        my_data->device_dispatch_table->UnmapMemory(device, mem);
10238    }
10239}
10240
10241VkBool32 validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount,
10242                                const VkMappedMemoryRange *pMemRanges) {
10243    VkBool32 skipCall = VK_FALSE;
10244    for (uint32_t i = 0; i < memRangeCount; ++i) {
10245        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
10246        if (mem_element != my_data->memObjMap.end()) {
10247            if (mem_element->second.memRange.offset > pMemRanges[i].offset) {
10248                skipCall |= log_msg(
10249                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10250                    (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10251                    "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
10252                    "(" PRINTF_SIZE_T_SPECIFIER ").",
10253                    funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_element->second.memRange.offset));
10254            }
10255            if ((mem_element->second.memRange.size != VK_WHOLE_SIZE) &&
10256                ((mem_element->second.memRange.offset + mem_element->second.memRange.size) <
10257                 (pMemRanges[i].offset + pMemRanges[i].size))) {
10258                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10259                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10260                                    MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
10261                                                                 ") exceeds the Memory Object's upper-bound "
10262                                                                 "(" PRINTF_SIZE_T_SPECIFIER ").",
10263                                    funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
10264                                    static_cast<size_t>(mem_element->second.memRange.offset + mem_element->second.memRange.size));
10265            }
10266        }
10267    }
10268    return skipCall;
10269}
10270
10271VkBool32 validateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount,
10272                                                  const VkMappedMemoryRange *pMemRanges) {
10273    VkBool32 skipCall = VK_FALSE;
10274    for (uint32_t i = 0; i < memRangeCount; ++i) {
10275        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
10276        if (mem_element != my_data->memObjMap.end()) {
10277            if (mem_element->second.pData) {
10278                VkDeviceSize size = mem_element->second.memRange.size;
10279                VkDeviceSize half_size = (size / 2);
10280                char *data = static_cast<char *>(mem_element->second.pData);
10281                for (auto j = 0; j < half_size; ++j) {
10282                    if (data[j] != NoncoherentMemoryFillValue) {
10283                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10284                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10285                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
10286                                            (uint64_t)pMemRanges[i].memory);
10287                    }
10288                }
10289                for (auto j = size + half_size; j < 2 * size; ++j) {
10290                    if (data[j] != NoncoherentMemoryFillValue) {
10291                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10292                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10293                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
10294                                            (uint64_t)pMemRanges[i].memory);
10295                    }
10296                }
10297                memcpy(mem_element->second.pDriverData, static_cast<void *>(data + (size_t)(half_size)), (size_t)(size));
10298            }
10299        }
10300    }
10301    return skipCall;
10302}
10303
10304VK_LAYER_EXPORT VkResult VKAPI_CALL
10305vkFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10306    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10307    VkBool32 skipCall = VK_FALSE;
10308    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10309
10310    loader_platform_thread_lock_mutex(&globalLock);
10311    skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
10312    skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
10313    loader_platform_thread_unlock_mutex(&globalLock);
10314    if (VK_FALSE == skipCall) {
10315        result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
10316    }
10317    return result;
10318}
10319
10320VK_LAYER_EXPORT VkResult VKAPI_CALL
10321vkInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10322    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10323    VkBool32 skipCall = VK_FALSE;
10324    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10325
10326    loader_platform_thread_lock_mutex(&globalLock);
10327    skipCall |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
10328    loader_platform_thread_unlock_mutex(&globalLock);
10329    if (VK_FALSE == skipCall) {
10330        result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
10331    }
10332    return result;
10333}
10334#endif
10335
10336VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
10337    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10338    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10339    VkBool32 skipCall = VK_FALSE;
10340#if MTMERGESOURCE
10341    loader_platform_thread_lock_mutex(&globalLock);
10342    // Track objects tied to memory
10343    uint64_t image_handle = (uint64_t)(image);
10344    skipCall =
10345        set_mem_binding(dev_data, device, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
10346    add_object_binding_info(dev_data, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, mem);
10347    {
10348        VkMemoryRequirements memRequirements;
10349        vkGetImageMemoryRequirements(device, image, &memRequirements);
10350        skipCall |= validate_buffer_image_aliasing(dev_data, image_handle, mem, memoryOffset, memRequirements,
10351                                                   dev_data->memObjMap[mem].imageRanges, dev_data->memObjMap[mem].bufferRanges,
10352                                                   VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
10353    }
10354    print_mem_list(dev_data, device);
10355    loader_platform_thread_unlock_mutex(&globalLock);
10356#endif
10357    if (VK_FALSE == skipCall) {
10358        result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset);
10359        VkMemoryRequirements memRequirements;
10360        dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements);
10361        loader_platform_thread_lock_mutex(&globalLock);
10362        dev_data->memObjMap[mem].image = image;
10363        dev_data->imageMap[image].mem = mem;
10364        dev_data->imageMap[image].memOffset = memoryOffset;
10365        dev_data->imageMap[image].memSize = memRequirements.size;
10366        loader_platform_thread_unlock_mutex(&globalLock);
10367    }
10368    return result;
10369}
10370
10371VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(VkDevice device, VkEvent event) {
10372    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10373    loader_platform_thread_lock_mutex(&globalLock);
10374    dev_data->eventMap[event].needsSignaled = false;
10375    dev_data->eventMap[event].stageMask = VK_PIPELINE_STAGE_HOST_BIT;
10376    loader_platform_thread_unlock_mutex(&globalLock);
10377    VkResult result = dev_data->device_dispatch_table->SetEvent(device, event);
10378    return result;
10379}
10380
10381VKAPI_ATTR VkResult VKAPI_CALL
10382vkQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
10383    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10384    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10385    VkBool32 skip_call = VK_FALSE;
10386#if MTMERGESOURCE
10387    //MTMTODO : Merge this code with the checks below
10388    loader_platform_thread_lock_mutex(&globalLock);
10389
10390    for (uint32_t i = 0; i < bindInfoCount; i++) {
10391        const VkBindSparseInfo *bindInfo = &pBindInfo[i];
10392        // Track objects tied to memory
10393        for (uint32_t j = 0; j < bindInfo->bufferBindCount; j++) {
10394            for (uint32_t k = 0; k < bindInfo->pBufferBinds[j].bindCount; k++) {
10395                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pBufferBinds[j].pBinds[k].memory,
10396                                           (uint64_t)bindInfo->pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
10397                                           "vkQueueBindSparse"))
10398                    skip_call = VK_TRUE;
10399            }
10400        }
10401        for (uint32_t j = 0; j < bindInfo->imageOpaqueBindCount; j++) {
10402            for (uint32_t k = 0; k < bindInfo->pImageOpaqueBinds[j].bindCount; k++) {
10403                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pImageOpaqueBinds[j].pBinds[k].memory,
10404                                           (uint64_t)bindInfo->pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10405                                           "vkQueueBindSparse"))
10406                    skip_call = VK_TRUE;
10407            }
10408        }
10409        for (uint32_t j = 0; j < bindInfo->imageBindCount; j++) {
10410            for (uint32_t k = 0; k < bindInfo->pImageBinds[j].bindCount; k++) {
10411                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pImageBinds[j].pBinds[k].memory,
10412                                           (uint64_t)bindInfo->pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10413                                           "vkQueueBindSparse"))
10414                    skip_call = VK_TRUE;
10415            }
10416        }
10417        // Validate semaphore state
10418        for (uint32_t i = 0; i < bindInfo->waitSemaphoreCount; i++) {
10419            VkSemaphore sem = bindInfo->pWaitSemaphores[i];
10420
10421            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10422                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_SIGNALLED) {
10423                    skip_call =
10424                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10425                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10426                                "vkQueueBindSparse: Semaphore must be in signaled state before passing to pWaitSemaphores");
10427                }
10428                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_WAIT;
10429            }
10430        }
10431        for (uint32_t i = 0; i < bindInfo->signalSemaphoreCount; i++) {
10432            VkSemaphore sem = bindInfo->pSignalSemaphores[i];
10433
10434            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10435                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
10436                    skip_call =
10437                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10438                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10439                                "vkQueueBindSparse: Semaphore must not be currently signaled or in a wait state");
10440                }
10441                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
10442            }
10443        }
10444    }
10445
10446    print_mem_list(dev_data, queue);
10447    loader_platform_thread_unlock_mutex(&globalLock);
10448#endif
10449    loader_platform_thread_lock_mutex(&globalLock);
10450    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
10451        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
10452        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10453            if (dev_data->semaphoreMap[bindInfo.pWaitSemaphores[i]].signaled) {
10454                dev_data->semaphoreMap[bindInfo.pWaitSemaphores[i]].signaled = 0;
10455            } else {
10456                skip_call |=
10457                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10458                            __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10459                            "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
10460                            (uint64_t)(queue), (uint64_t)(bindInfo.pWaitSemaphores[i]));
10461            }
10462        }
10463        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
10464            dev_data->semaphoreMap[bindInfo.pSignalSemaphores[i]].signaled = 1;
10465        }
10466    }
10467    loader_platform_thread_unlock_mutex(&globalLock);
10468
10469    if (VK_FALSE == skip_call)
10470        return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
10471#if MTMERGESOURCE
10472    // Update semaphore state
10473    loader_platform_thread_lock_mutex(&globalLock);
10474    for (uint32_t bind_info_idx = 0; bind_info_idx < bindInfoCount; bind_info_idx++) {
10475        const VkBindSparseInfo *bindInfo = &pBindInfo[bind_info_idx];
10476        for (uint32_t i = 0; i < bindInfo->waitSemaphoreCount; i++) {
10477            VkSemaphore sem = bindInfo->pWaitSemaphores[i];
10478
10479            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10480                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10481            }
10482        }
10483    }
10484    loader_platform_thread_unlock_mutex(&globalLock);
10485#endif
10486
10487    return result;
10488}
10489
10490VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
10491                                                 const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
10492    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10493    VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
10494    if (result == VK_SUCCESS) {
10495        loader_platform_thread_lock_mutex(&globalLock);
10496        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
10497        sNode->signaled = 0;
10498        sNode->queue = VK_NULL_HANDLE;
10499        sNode->in_use.store(0);
10500        sNode->state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10501        loader_platform_thread_unlock_mutex(&globalLock);
10502    }
10503    return result;
10504}
10505
10506VKAPI_ATTR VkResult VKAPI_CALL
10507vkCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
10508    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10509    VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
10510    if (result == VK_SUCCESS) {
10511        loader_platform_thread_lock_mutex(&globalLock);
10512        dev_data->eventMap[*pEvent].needsSignaled = false;
10513        dev_data->eventMap[*pEvent].in_use.store(0);
10514        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
10515        loader_platform_thread_unlock_mutex(&globalLock);
10516    }
10517    return result;
10518}
10519
10520VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
10521                                                                    const VkAllocationCallbacks *pAllocator,
10522                                                                    VkSwapchainKHR *pSwapchain) {
10523    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10524    VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
10525
10526    if (VK_SUCCESS == result) {
10527        SWAPCHAIN_NODE *psc_node = new SWAPCHAIN_NODE(pCreateInfo);
10528        loader_platform_thread_lock_mutex(&globalLock);
10529        dev_data->device_extensions.swapchainMap[*pSwapchain] = psc_node;
10530        loader_platform_thread_unlock_mutex(&globalLock);
10531    }
10532
10533    return result;
10534}
10535
10536VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10537vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
10538    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10539    bool skipCall = false;
10540
10541    loader_platform_thread_lock_mutex(&globalLock);
10542    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(swapchain);
10543    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
10544        if (swapchain_data->second->images.size() > 0) {
10545            for (auto swapchain_image : swapchain_data->second->images) {
10546                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
10547                if (image_sub != dev_data->imageSubresourceMap.end()) {
10548                    for (auto imgsubpair : image_sub->second) {
10549                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
10550                        if (image_item != dev_data->imageLayoutMap.end()) {
10551                            dev_data->imageLayoutMap.erase(image_item);
10552                        }
10553                    }
10554                    dev_data->imageSubresourceMap.erase(image_sub);
10555                }
10556#if MTMERGESOURCE
10557                skipCall = clear_object_binding(dev_data, device, (uint64_t)swapchain_image,
10558                                                VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
10559                dev_data->imageBindingMap.erase((uint64_t)swapchain_image);
10560#endif
10561            }
10562        }
10563        delete swapchain_data->second;
10564        dev_data->device_extensions.swapchainMap.erase(swapchain);
10565    }
10566    loader_platform_thread_unlock_mutex(&globalLock);
10567    if (!skipCall)
10568        dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator);
10569}
10570
10571VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10572vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
10573    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10574    VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
10575
10576    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
10577        // This should never happen and is checked by param checker.
10578        if (!pCount)
10579            return result;
10580        loader_platform_thread_lock_mutex(&globalLock);
10581        const size_t count = *pCount;
10582        auto swapchain_node = dev_data->device_extensions.swapchainMap[swapchain];
10583        if (!swapchain_node->images.empty()) {
10584            // TODO : Not sure I like the memcmp here, but it works
10585            const bool mismatch = (swapchain_node->images.size() != count ||
10586                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
10587            if (mismatch) {
10588                // TODO: Verify against Valid Usage section of extension
10589                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10590                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
10591                        "vkGetSwapchainInfoKHR(%" PRIu64
10592                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
10593                        (uint64_t)(swapchain));
10594            }
10595        }
10596        for (uint32_t i = 0; i < *pCount; ++i) {
10597            IMAGE_LAYOUT_NODE image_layout_node;
10598            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
10599            image_layout_node.format = swapchain_node->createInfo.imageFormat;
10600            dev_data->imageMap[pSwapchainImages[i]].createInfo.mipLevels = 1;
10601            dev_data->imageMap[pSwapchainImages[i]].createInfo.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
10602            swapchain_node->images.push_back(pSwapchainImages[i]);
10603            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
10604            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
10605            dev_data->imageLayoutMap[subpair] = image_layout_node;
10606            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
10607        }
10608        if (!swapchain_node->images.empty()) {
10609            for (auto image : swapchain_node->images) {
10610                // Add image object binding, then insert the new Mem Object and then bind it to created image
10611#if MTMERGESOURCE
10612                add_object_create_info(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10613                                       &swapchain_node->createInfo);
10614#endif
10615            }
10616        }
10617        loader_platform_thread_unlock_mutex(&globalLock);
10618    }
10619    return result;
10620}
10621
10622VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
10623    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10624    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10625    bool skip_call = false;
10626
10627    if (pPresentInfo) {
10628        loader_platform_thread_lock_mutex(&globalLock);
10629        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10630            if (dev_data->semaphoreMap[pPresentInfo->pWaitSemaphores[i]].signaled) {
10631                dev_data->semaphoreMap[pPresentInfo->pWaitSemaphores[i]].signaled = 0;
10632            } else {
10633                skip_call |=
10634                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10635                            __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10636                            "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
10637                            (uint64_t)(queue), (uint64_t)(pPresentInfo->pWaitSemaphores[i]));
10638            }
10639        }
10640        VkDeviceMemory mem;
10641        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10642            auto swapchain_data = dev_data->device_extensions.swapchainMap.find(pPresentInfo->pSwapchains[i]);
10643            if (swapchain_data != dev_data->device_extensions.swapchainMap.end() &&
10644                pPresentInfo->pImageIndices[i] < swapchain_data->second->images.size()) {
10645                VkImage image = swapchain_data->second->images[pPresentInfo->pImageIndices[i]];
10646#if MTMERGESOURCE
10647                skip_call |=
10648                    get_mem_binding_from_object(dev_data, queue, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
10649                skip_call |= validate_memory_is_valid(dev_data, mem, "vkQueuePresentKHR()", image);
10650#endif
10651                vector<VkImageLayout> layouts;
10652                if (FindLayouts(dev_data, image, layouts)) {
10653                    for (auto layout : layouts) {
10654                        if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
10655                            skip_call |=
10656                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
10657                                        reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10658                                        "Images passed to present must be in layout "
10659                                        "PRESENT_SOURCE_KHR but is in %s",
10660                                        string_VkImageLayout(layout));
10661                        }
10662                    }
10663                }
10664            }
10665        }
10666        loader_platform_thread_unlock_mutex(&globalLock);
10667    }
10668
10669    if (!skip_call)
10670        result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo);
10671#if MTMERGESOURCE
10672    loader_platform_thread_lock_mutex(&globalLock);
10673    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; i++) {
10674        VkSemaphore sem = pPresentInfo->pWaitSemaphores[i];
10675        if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10676            dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10677        }
10678    }
10679    loader_platform_thread_unlock_mutex(&globalLock);
10680#endif
10681    return result;
10682}
10683
10684VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
10685                                                     VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
10686    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10687    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10688    bool skipCall = false;
10689#if MTMERGESOURCE
10690    loader_platform_thread_lock_mutex(&globalLock);
10691    if (semaphore != VK_NULL_HANDLE &&
10692        dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
10693        if (dev_data->semaphoreMap[semaphore].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
10694            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10695                               (uint64_t)semaphore, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10696                               "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
10697        }
10698        dev_data->semaphoreMap[semaphore].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
10699        dev_data->semaphoreMap[semaphore].in_use.fetch_add(1);
10700    }
10701    auto fence_data = dev_data->fenceMap.find(fence);
10702    if (fence_data != dev_data->fenceMap.end()) {
10703        fence_data->second.swapchain = swapchain;
10704    }
10705    loader_platform_thread_unlock_mutex(&globalLock);
10706#endif
10707    if (!skipCall) {
10708        result =
10709            dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
10710    }
10711    loader_platform_thread_lock_mutex(&globalLock);
10712    // FIXME/TODO: Need to add some thing code the "fence" parameter
10713    dev_data->semaphoreMap[semaphore].signaled = 1;
10714    loader_platform_thread_unlock_mutex(&globalLock);
10715    return result;
10716}
10717
10718VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10719vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10720                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
10721    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10722    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10723    VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10724    if (VK_SUCCESS == res) {
10725        loader_platform_thread_lock_mutex(&globalLock);
10726        res = layer_create_msg_callback(my_data->report_data, pCreateInfo, pAllocator, pMsgCallback);
10727        loader_platform_thread_unlock_mutex(&globalLock);
10728    }
10729    return res;
10730}
10731
10732VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance,
10733                                                                           VkDebugReportCallbackEXT msgCallback,
10734                                                                           const VkAllocationCallbacks *pAllocator) {
10735    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10736    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10737    pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10738    loader_platform_thread_lock_mutex(&globalLock);
10739    layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
10740    loader_platform_thread_unlock_mutex(&globalLock);
10741}
10742
10743VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10744vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
10745                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10746    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10747    my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
10748                                                            pMsg);
10749}
10750
10751VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10752    if (!strcmp(funcName, "vkGetDeviceProcAddr"))
10753        return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
10754    if (!strcmp(funcName, "vkDestroyDevice"))
10755        return (PFN_vkVoidFunction)vkDestroyDevice;
10756    if (!strcmp(funcName, "vkQueueSubmit"))
10757        return (PFN_vkVoidFunction)vkQueueSubmit;
10758    if (!strcmp(funcName, "vkWaitForFences"))
10759        return (PFN_vkVoidFunction)vkWaitForFences;
10760    if (!strcmp(funcName, "vkGetFenceStatus"))
10761        return (PFN_vkVoidFunction)vkGetFenceStatus;
10762    if (!strcmp(funcName, "vkQueueWaitIdle"))
10763        return (PFN_vkVoidFunction)vkQueueWaitIdle;
10764    if (!strcmp(funcName, "vkDeviceWaitIdle"))
10765        return (PFN_vkVoidFunction)vkDeviceWaitIdle;
10766    if (!strcmp(funcName, "vkGetDeviceQueue"))
10767        return (PFN_vkVoidFunction)vkGetDeviceQueue;
10768    if (!strcmp(funcName, "vkDestroyInstance"))
10769        return (PFN_vkVoidFunction)vkDestroyInstance;
10770    if (!strcmp(funcName, "vkDestroyDevice"))
10771        return (PFN_vkVoidFunction)vkDestroyDevice;
10772    if (!strcmp(funcName, "vkDestroyFence"))
10773        return (PFN_vkVoidFunction)vkDestroyFence;
10774    if (!strcmp(funcName, "vkResetFences"))
10775        return (PFN_vkVoidFunction)vkResetFences;
10776    if (!strcmp(funcName, "vkDestroySemaphore"))
10777        return (PFN_vkVoidFunction)vkDestroySemaphore;
10778    if (!strcmp(funcName, "vkDestroyEvent"))
10779        return (PFN_vkVoidFunction)vkDestroyEvent;
10780    if (!strcmp(funcName, "vkDestroyQueryPool"))
10781        return (PFN_vkVoidFunction)vkDestroyQueryPool;
10782    if (!strcmp(funcName, "vkDestroyBuffer"))
10783        return (PFN_vkVoidFunction)vkDestroyBuffer;
10784    if (!strcmp(funcName, "vkDestroyBufferView"))
10785        return (PFN_vkVoidFunction)vkDestroyBufferView;
10786    if (!strcmp(funcName, "vkDestroyImage"))
10787        return (PFN_vkVoidFunction)vkDestroyImage;
10788    if (!strcmp(funcName, "vkDestroyImageView"))
10789        return (PFN_vkVoidFunction)vkDestroyImageView;
10790    if (!strcmp(funcName, "vkDestroyShaderModule"))
10791        return (PFN_vkVoidFunction)vkDestroyShaderModule;
10792    if (!strcmp(funcName, "vkDestroyPipeline"))
10793        return (PFN_vkVoidFunction)vkDestroyPipeline;
10794    if (!strcmp(funcName, "vkDestroyPipelineLayout"))
10795        return (PFN_vkVoidFunction)vkDestroyPipelineLayout;
10796    if (!strcmp(funcName, "vkDestroySampler"))
10797        return (PFN_vkVoidFunction)vkDestroySampler;
10798    if (!strcmp(funcName, "vkDestroyDescriptorSetLayout"))
10799        return (PFN_vkVoidFunction)vkDestroyDescriptorSetLayout;
10800    if (!strcmp(funcName, "vkDestroyDescriptorPool"))
10801        return (PFN_vkVoidFunction)vkDestroyDescriptorPool;
10802    if (!strcmp(funcName, "vkDestroyFramebuffer"))
10803        return (PFN_vkVoidFunction)vkDestroyFramebuffer;
10804    if (!strcmp(funcName, "vkDestroyRenderPass"))
10805        return (PFN_vkVoidFunction)vkDestroyRenderPass;
10806    if (!strcmp(funcName, "vkCreateBuffer"))
10807        return (PFN_vkVoidFunction)vkCreateBuffer;
10808    if (!strcmp(funcName, "vkCreateBufferView"))
10809        return (PFN_vkVoidFunction)vkCreateBufferView;
10810    if (!strcmp(funcName, "vkCreateImage"))
10811        return (PFN_vkVoidFunction)vkCreateImage;
10812    if (!strcmp(funcName, "vkCreateImageView"))
10813        return (PFN_vkVoidFunction)vkCreateImageView;
10814    if (!strcmp(funcName, "vkCreateFence"))
10815        return (PFN_vkVoidFunction)vkCreateFence;
10816    if (!strcmp(funcName, "CreatePipelineCache"))
10817        return (PFN_vkVoidFunction)vkCreatePipelineCache;
10818    if (!strcmp(funcName, "DestroyPipelineCache"))
10819        return (PFN_vkVoidFunction)vkDestroyPipelineCache;
10820    if (!strcmp(funcName, "GetPipelineCacheData"))
10821        return (PFN_vkVoidFunction)vkGetPipelineCacheData;
10822    if (!strcmp(funcName, "MergePipelineCaches"))
10823        return (PFN_vkVoidFunction)vkMergePipelineCaches;
10824    if (!strcmp(funcName, "vkCreateGraphicsPipelines"))
10825        return (PFN_vkVoidFunction)vkCreateGraphicsPipelines;
10826    if (!strcmp(funcName, "vkCreateComputePipelines"))
10827        return (PFN_vkVoidFunction)vkCreateComputePipelines;
10828    if (!strcmp(funcName, "vkCreateSampler"))
10829        return (PFN_vkVoidFunction)vkCreateSampler;
10830    if (!strcmp(funcName, "vkCreateDescriptorSetLayout"))
10831        return (PFN_vkVoidFunction)vkCreateDescriptorSetLayout;
10832    if (!strcmp(funcName, "vkCreatePipelineLayout"))
10833        return (PFN_vkVoidFunction)vkCreatePipelineLayout;
10834    if (!strcmp(funcName, "vkCreateDescriptorPool"))
10835        return (PFN_vkVoidFunction)vkCreateDescriptorPool;
10836    if (!strcmp(funcName, "vkResetDescriptorPool"))
10837        return (PFN_vkVoidFunction)vkResetDescriptorPool;
10838    if (!strcmp(funcName, "vkAllocateDescriptorSets"))
10839        return (PFN_vkVoidFunction)vkAllocateDescriptorSets;
10840    if (!strcmp(funcName, "vkFreeDescriptorSets"))
10841        return (PFN_vkVoidFunction)vkFreeDescriptorSets;
10842    if (!strcmp(funcName, "vkUpdateDescriptorSets"))
10843        return (PFN_vkVoidFunction)vkUpdateDescriptorSets;
10844    if (!strcmp(funcName, "vkCreateCommandPool"))
10845        return (PFN_vkVoidFunction)vkCreateCommandPool;
10846    if (!strcmp(funcName, "vkDestroyCommandPool"))
10847        return (PFN_vkVoidFunction)vkDestroyCommandPool;
10848    if (!strcmp(funcName, "vkResetCommandPool"))
10849        return (PFN_vkVoidFunction)vkResetCommandPool;
10850    if (!strcmp(funcName, "vkCreateQueryPool"))
10851        return (PFN_vkVoidFunction)vkCreateQueryPool;
10852    if (!strcmp(funcName, "vkAllocateCommandBuffers"))
10853        return (PFN_vkVoidFunction)vkAllocateCommandBuffers;
10854    if (!strcmp(funcName, "vkFreeCommandBuffers"))
10855        return (PFN_vkVoidFunction)vkFreeCommandBuffers;
10856    if (!strcmp(funcName, "vkBeginCommandBuffer"))
10857        return (PFN_vkVoidFunction)vkBeginCommandBuffer;
10858    if (!strcmp(funcName, "vkEndCommandBuffer"))
10859        return (PFN_vkVoidFunction)vkEndCommandBuffer;
10860    if (!strcmp(funcName, "vkResetCommandBuffer"))
10861        return (PFN_vkVoidFunction)vkResetCommandBuffer;
10862    if (!strcmp(funcName, "vkCmdBindPipeline"))
10863        return (PFN_vkVoidFunction)vkCmdBindPipeline;
10864    if (!strcmp(funcName, "vkCmdSetViewport"))
10865        return (PFN_vkVoidFunction)vkCmdSetViewport;
10866    if (!strcmp(funcName, "vkCmdSetScissor"))
10867        return (PFN_vkVoidFunction)vkCmdSetScissor;
10868    if (!strcmp(funcName, "vkCmdSetLineWidth"))
10869        return (PFN_vkVoidFunction)vkCmdSetLineWidth;
10870    if (!strcmp(funcName, "vkCmdSetDepthBias"))
10871        return (PFN_vkVoidFunction)vkCmdSetDepthBias;
10872    if (!strcmp(funcName, "vkCmdSetBlendConstants"))
10873        return (PFN_vkVoidFunction)vkCmdSetBlendConstants;
10874    if (!strcmp(funcName, "vkCmdSetDepthBounds"))
10875        return (PFN_vkVoidFunction)vkCmdSetDepthBounds;
10876    if (!strcmp(funcName, "vkCmdSetStencilCompareMask"))
10877        return (PFN_vkVoidFunction)vkCmdSetStencilCompareMask;
10878    if (!strcmp(funcName, "vkCmdSetStencilWriteMask"))
10879        return (PFN_vkVoidFunction)vkCmdSetStencilWriteMask;
10880    if (!strcmp(funcName, "vkCmdSetStencilReference"))
10881        return (PFN_vkVoidFunction)vkCmdSetStencilReference;
10882    if (!strcmp(funcName, "vkCmdBindDescriptorSets"))
10883        return (PFN_vkVoidFunction)vkCmdBindDescriptorSets;
10884    if (!strcmp(funcName, "vkCmdBindVertexBuffers"))
10885        return (PFN_vkVoidFunction)vkCmdBindVertexBuffers;
10886    if (!strcmp(funcName, "vkCmdBindIndexBuffer"))
10887        return (PFN_vkVoidFunction)vkCmdBindIndexBuffer;
10888    if (!strcmp(funcName, "vkCmdDraw"))
10889        return (PFN_vkVoidFunction)vkCmdDraw;
10890    if (!strcmp(funcName, "vkCmdDrawIndexed"))
10891        return (PFN_vkVoidFunction)vkCmdDrawIndexed;
10892    if (!strcmp(funcName, "vkCmdDrawIndirect"))
10893        return (PFN_vkVoidFunction)vkCmdDrawIndirect;
10894    if (!strcmp(funcName, "vkCmdDrawIndexedIndirect"))
10895        return (PFN_vkVoidFunction)vkCmdDrawIndexedIndirect;
10896    if (!strcmp(funcName, "vkCmdDispatch"))
10897        return (PFN_vkVoidFunction)vkCmdDispatch;
10898    if (!strcmp(funcName, "vkCmdDispatchIndirect"))
10899        return (PFN_vkVoidFunction)vkCmdDispatchIndirect;
10900    if (!strcmp(funcName, "vkCmdCopyBuffer"))
10901        return (PFN_vkVoidFunction)vkCmdCopyBuffer;
10902    if (!strcmp(funcName, "vkCmdCopyImage"))
10903        return (PFN_vkVoidFunction)vkCmdCopyImage;
10904    if (!strcmp(funcName, "vkCmdBlitImage"))
10905        return (PFN_vkVoidFunction)vkCmdBlitImage;
10906    if (!strcmp(funcName, "vkCmdCopyBufferToImage"))
10907        return (PFN_vkVoidFunction)vkCmdCopyBufferToImage;
10908    if (!strcmp(funcName, "vkCmdCopyImageToBuffer"))
10909        return (PFN_vkVoidFunction)vkCmdCopyImageToBuffer;
10910    if (!strcmp(funcName, "vkCmdUpdateBuffer"))
10911        return (PFN_vkVoidFunction)vkCmdUpdateBuffer;
10912    if (!strcmp(funcName, "vkCmdFillBuffer"))
10913        return (PFN_vkVoidFunction)vkCmdFillBuffer;
10914    if (!strcmp(funcName, "vkCmdClearColorImage"))
10915        return (PFN_vkVoidFunction)vkCmdClearColorImage;
10916    if (!strcmp(funcName, "vkCmdClearDepthStencilImage"))
10917        return (PFN_vkVoidFunction)vkCmdClearDepthStencilImage;
10918    if (!strcmp(funcName, "vkCmdClearAttachments"))
10919        return (PFN_vkVoidFunction)vkCmdClearAttachments;
10920    if (!strcmp(funcName, "vkCmdResolveImage"))
10921        return (PFN_vkVoidFunction)vkCmdResolveImage;
10922    if (!strcmp(funcName, "vkCmdSetEvent"))
10923        return (PFN_vkVoidFunction)vkCmdSetEvent;
10924    if (!strcmp(funcName, "vkCmdResetEvent"))
10925        return (PFN_vkVoidFunction)vkCmdResetEvent;
10926    if (!strcmp(funcName, "vkCmdWaitEvents"))
10927        return (PFN_vkVoidFunction)vkCmdWaitEvents;
10928    if (!strcmp(funcName, "vkCmdPipelineBarrier"))
10929        return (PFN_vkVoidFunction)vkCmdPipelineBarrier;
10930    if (!strcmp(funcName, "vkCmdBeginQuery"))
10931        return (PFN_vkVoidFunction)vkCmdBeginQuery;
10932    if (!strcmp(funcName, "vkCmdEndQuery"))
10933        return (PFN_vkVoidFunction)vkCmdEndQuery;
10934    if (!strcmp(funcName, "vkCmdResetQueryPool"))
10935        return (PFN_vkVoidFunction)vkCmdResetQueryPool;
10936    if (!strcmp(funcName, "vkCmdCopyQueryPoolResults"))
10937        return (PFN_vkVoidFunction)vkCmdCopyQueryPoolResults;
10938    if (!strcmp(funcName, "vkCmdPushConstants"))
10939        return (PFN_vkVoidFunction)vkCmdPushConstants;
10940    if (!strcmp(funcName, "vkCmdWriteTimestamp"))
10941        return (PFN_vkVoidFunction)vkCmdWriteTimestamp;
10942    if (!strcmp(funcName, "vkCreateFramebuffer"))
10943        return (PFN_vkVoidFunction)vkCreateFramebuffer;
10944    if (!strcmp(funcName, "vkCreateShaderModule"))
10945        return (PFN_vkVoidFunction)vkCreateShaderModule;
10946    if (!strcmp(funcName, "vkCreateRenderPass"))
10947        return (PFN_vkVoidFunction)vkCreateRenderPass;
10948    if (!strcmp(funcName, "vkCmdBeginRenderPass"))
10949        return (PFN_vkVoidFunction)vkCmdBeginRenderPass;
10950    if (!strcmp(funcName, "vkCmdNextSubpass"))
10951        return (PFN_vkVoidFunction)vkCmdNextSubpass;
10952    if (!strcmp(funcName, "vkCmdEndRenderPass"))
10953        return (PFN_vkVoidFunction)vkCmdEndRenderPass;
10954    if (!strcmp(funcName, "vkCmdExecuteCommands"))
10955        return (PFN_vkVoidFunction)vkCmdExecuteCommands;
10956    if (!strcmp(funcName, "vkSetEvent"))
10957        return (PFN_vkVoidFunction)vkSetEvent;
10958    if (!strcmp(funcName, "vkMapMemory"))
10959        return (PFN_vkVoidFunction)vkMapMemory;
10960#if MTMERGESOURCE
10961    if (!strcmp(funcName, "vkUnmapMemory"))
10962        return (PFN_vkVoidFunction)vkUnmapMemory;
10963    if (!strcmp(funcName, "vkAllocateMemory"))
10964        return (PFN_vkVoidFunction)vkAllocateMemory;
10965    if (!strcmp(funcName, "vkFreeMemory"))
10966        return (PFN_vkVoidFunction)vkFreeMemory;
10967    if (!strcmp(funcName, "vkFlushMappedMemoryRanges"))
10968        return (PFN_vkVoidFunction)vkFlushMappedMemoryRanges;
10969    if (!strcmp(funcName, "vkInvalidateMappedMemoryRanges"))
10970        return (PFN_vkVoidFunction)vkInvalidateMappedMemoryRanges;
10971    if (!strcmp(funcName, "vkBindBufferMemory"))
10972        return (PFN_vkVoidFunction)vkBindBufferMemory;
10973    if (!strcmp(funcName, "vkGetBufferMemoryRequirements"))
10974        return (PFN_vkVoidFunction)vkGetBufferMemoryRequirements;
10975    if (!strcmp(funcName, "vkGetImageMemoryRequirements"))
10976        return (PFN_vkVoidFunction)vkGetImageMemoryRequirements;
10977#endif
10978    if (!strcmp(funcName, "vkGetQueryPoolResults"))
10979        return (PFN_vkVoidFunction)vkGetQueryPoolResults;
10980    if (!strcmp(funcName, "vkBindImageMemory"))
10981        return (PFN_vkVoidFunction)vkBindImageMemory;
10982    if (!strcmp(funcName, "vkQueueBindSparse"))
10983        return (PFN_vkVoidFunction)vkQueueBindSparse;
10984    if (!strcmp(funcName, "vkCreateSemaphore"))
10985        return (PFN_vkVoidFunction)vkCreateSemaphore;
10986    if (!strcmp(funcName, "vkCreateEvent"))
10987        return (PFN_vkVoidFunction)vkCreateEvent;
10988
10989    if (dev == NULL)
10990        return NULL;
10991
10992    layer_data *dev_data;
10993    dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
10994
10995    if (dev_data->device_extensions.wsi_enabled) {
10996        if (!strcmp(funcName, "vkCreateSwapchainKHR"))
10997            return (PFN_vkVoidFunction)vkCreateSwapchainKHR;
10998        if (!strcmp(funcName, "vkDestroySwapchainKHR"))
10999            return (PFN_vkVoidFunction)vkDestroySwapchainKHR;
11000        if (!strcmp(funcName, "vkGetSwapchainImagesKHR"))
11001            return (PFN_vkVoidFunction)vkGetSwapchainImagesKHR;
11002        if (!strcmp(funcName, "vkAcquireNextImageKHR"))
11003            return (PFN_vkVoidFunction)vkAcquireNextImageKHR;
11004        if (!strcmp(funcName, "vkQueuePresentKHR"))
11005            return (PFN_vkVoidFunction)vkQueuePresentKHR;
11006    }
11007
11008    VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
11009    {
11010        if (pTable->GetDeviceProcAddr == NULL)
11011            return NULL;
11012        return pTable->GetDeviceProcAddr(dev, funcName);
11013    }
11014}
11015
11016VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
11017    if (!strcmp(funcName, "vkGetInstanceProcAddr"))
11018        return (PFN_vkVoidFunction)vkGetInstanceProcAddr;
11019    if (!strcmp(funcName, "vkGetDeviceProcAddr"))
11020        return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
11021    if (!strcmp(funcName, "vkCreateInstance"))
11022        return (PFN_vkVoidFunction)vkCreateInstance;
11023    if (!strcmp(funcName, "vkCreateDevice"))
11024        return (PFN_vkVoidFunction)vkCreateDevice;
11025    if (!strcmp(funcName, "vkDestroyInstance"))
11026        return (PFN_vkVoidFunction)vkDestroyInstance;
11027#if MTMERGESOURCE
11028    if (!strcmp(funcName, "vkGetPhysicalDeviceMemoryProperties"))
11029        return (PFN_vkVoidFunction)vkGetPhysicalDeviceMemoryProperties;
11030#endif
11031    if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties"))
11032        return (PFN_vkVoidFunction)vkEnumerateInstanceLayerProperties;
11033    if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties"))
11034        return (PFN_vkVoidFunction)vkEnumerateInstanceExtensionProperties;
11035    if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties"))
11036        return (PFN_vkVoidFunction)vkEnumerateDeviceLayerProperties;
11037    if (!strcmp(funcName, "vkEnumerateDeviceExtensionProperties"))
11038        return (PFN_vkVoidFunction)vkEnumerateDeviceExtensionProperties;
11039
11040    if (instance == NULL)
11041        return NULL;
11042
11043    PFN_vkVoidFunction fptr;
11044
11045    layer_data *my_data;
11046    my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
11047    fptr = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
11048    if (fptr)
11049        return fptr;
11050
11051    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
11052    if (pTable->GetInstanceProcAddr == NULL)
11053        return NULL;
11054    return pTable->GetInstanceProcAddr(instance, funcName);
11055}
11056