core_validation.cpp revision 7c755c8aca6857046df9516d8336416165969cb9
1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and/or associated documentation files (the "Materials"), to
8 * deal in the Materials without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Materials, and to permit persons to whom the Materials
11 * are furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice(s) and this permission notice shall be included
14 * in all copies or substantial portions of the Materials.
15 *
16 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 *
20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE
23 * USE OR OTHER DEALINGS IN THE MATERIALS
24 *
25 * Author: Cody Northrop <cnorthrop@google.com>
26 * Author: Michael Lentine <mlentine@google.com>
27 * Author: Tobin Ehlis <tobine@google.com>
28 * Author: Chia-I Wu <olv@google.com>
29 * Author: Chris Forbes <chrisf@ijw.co.nz>
30 * Author: Mark Lobodzinski <mark@lunarg.com>
31 * Author: Ian Elliott <ianelliott@google.com>
32 */
33
34// Allow use of STL min and max functions in Windows
35#define NOMINMAX
36
37// Turn on mem_tracker merged code
38#define MTMERGESOURCE 1
39
40#include <stdio.h>
41#include <stdlib.h>
42#include <string.h>
43#include <assert.h>
44#include <unordered_map>
45#include <unordered_set>
46#include <map>
47#include <string>
48#include <iostream>
49#include <algorithm>
50#include <list>
51#include <SPIRV/spirv.hpp>
52#include <set>
53
54#include "vk_loader_platform.h"
55#include "vk_dispatch_table_helper.h"
56#include "vk_struct_string_helper_cpp.h"
57#if defined(__GNUC__)
58#pragma GCC diagnostic ignored "-Wwrite-strings"
59#endif
60#if defined(__GNUC__)
61#pragma GCC diagnostic warning "-Wwrite-strings"
62#endif
63#include "vk_struct_size_helper.h"
64#include "core_validation.h"
65#include "vk_layer_config.h"
66#include "vk_layer_table.h"
67#include "vk_layer_data.h"
68#include "vk_layer_logging.h"
69#include "vk_layer_extension_utils.h"
70#include "vk_layer_utils.h"
71
72#if defined __ANDROID__
73#include <android/log.h>
74#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
75#else
76#define LOGCONSOLE(...) printf(__VA_ARGS__)
77#endif
78
79using std::unordered_map;
80using std::unordered_set;
81
82#if MTMERGESOURCE
83// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
84// Object value will be used to identify them internally.
85static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
86#endif
87// Track command pools and their command buffers
88struct CMD_POOL_INFO {
89    VkCommandPoolCreateFlags createFlags;
90    uint32_t queueFamilyIndex;
91    list<VkCommandBuffer> commandBuffers; // list container of cmd buffers allocated from this pool
92};
93
94struct devExts {
95    VkBool32 wsi_enabled;
96    unordered_map<VkSwapchainKHR, SWAPCHAIN_NODE *> swapchainMap;
97    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
98};
99
100// fwd decls
101struct shader_module;
102
103struct layer_data {
104    debug_report_data *report_data;
105    std::vector<VkDebugReportCallbackEXT> logging_callback;
106    VkLayerDispatchTable *device_dispatch_table;
107    VkLayerInstanceDispatchTable *instance_dispatch_table;
108#if MTMERGESOURCE
109// MTMERGESOURCE - stuff pulled directly from MT
110    uint64_t currentFenceId;
111    // Maps for tracking key structs related to mem_tracker state
112    // Images and Buffers are 2 objects that can have memory bound to them so they get special treatment
113    unordered_map<uint64_t, MT_OBJ_BINDING_INFO> imageBindingMap;
114    unordered_map<uint64_t, MT_OBJ_BINDING_INFO> bufferBindingMap;
115// MTMERGESOURCE - End of MT stuff
116#endif
117    devExts device_extensions;
118    unordered_set<VkQueue> queues;  // all queues under given device
119    // Global set of all cmdBuffers that are inFlight on this device
120    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
121    // Layer specific data
122    unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> sampleMap;
123    unordered_map<VkImageView, VkImageViewCreateInfo> imageViewMap;
124    unordered_map<VkImage, IMAGE_NODE> imageMap;
125    unordered_map<VkBufferView, VkBufferViewCreateInfo> bufferViewMap;
126    unordered_map<VkBuffer, BUFFER_NODE> bufferMap;
127    unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
128    unordered_map<VkCommandPool, CMD_POOL_INFO> commandPoolMap;
129    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
130    unordered_map<VkDescriptorSet, SET_NODE *> setMap;
131    unordered_map<VkDescriptorSetLayout, LAYOUT_NODE *> descriptorSetLayoutMap;
132    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
133    unordered_map<VkDeviceMemory, DEVICE_MEM_INFO> memObjMap;
134    unordered_map<VkFence, FENCE_NODE> fenceMap;
135    unordered_map<VkQueue, QUEUE_NODE> queueMap;
136    unordered_map<VkEvent, EVENT_NODE> eventMap;
137    unordered_map<QueryObject, bool> queryToStateMap;
138    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
139    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
140    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
141    unordered_map<VkFramebuffer, FRAMEBUFFER_NODE> frameBufferMap;
142    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
143    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
144    unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
145    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
146    // Current render pass
147    VkRenderPassBeginInfo renderPassBeginInfo;
148    uint32_t currentSubpass;
149    VkDevice device;
150
151    // Device specific data
152    PHYS_DEV_PROPERTIES_NODE physDevProperties;
153// MTMERGESOURCE - added a couple of fields to constructor initializer
154    layer_data()
155        : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr),
156#if MTMERGESOURCE
157        currentFenceId(1),
158#endif
159        device_extensions(){};
160};
161
162static const VkLayerProperties cv_global_layers[] = {{
163    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
164}};
165
166template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
167    bool foundLayer = false;
168    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
169        if (!strcmp(createInfo.ppEnabledLayerNames[i], cv_global_layers[0].layerName)) {
170            foundLayer = true;
171        }
172        // This has to be logged to console as we don't have a callback at this point.
173        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
174            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
175                       cv_global_layers[0].layerName);
176        }
177    }
178}
179
180// Code imported from shader_checker
181static void build_def_index(shader_module *);
182
183// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
184// without the caller needing to care too much about the physical SPIRV module layout.
185struct spirv_inst_iter {
186    std::vector<uint32_t>::const_iterator zero;
187    std::vector<uint32_t>::const_iterator it;
188
189    uint32_t len() { return *it >> 16; }
190    uint32_t opcode() { return *it & 0x0ffffu; }
191    uint32_t const &word(unsigned n) { return it[n]; }
192    uint32_t offset() { return (uint32_t)(it - zero); }
193
194    spirv_inst_iter() {}
195
196    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
197
198    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
199
200    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
201
202    spirv_inst_iter operator++(int) { /* x++ */
203        spirv_inst_iter ii = *this;
204        it += len();
205        return ii;
206    }
207
208    spirv_inst_iter operator++() { /* ++x; */
209        it += len();
210        return *this;
211    }
212
213    /* The iterator and the value are the same thing. */
214    spirv_inst_iter &operator*() { return *this; }
215    spirv_inst_iter const &operator*() const { return *this; }
216};
217
218struct shader_module {
219    /* the spirv image itself */
220    vector<uint32_t> words;
221    /* a mapping of <id> to the first word of its def. this is useful because walking type
222     * trees, constant expressions, etc requires jumping all over the instruction stream.
223     */
224    unordered_map<unsigned, unsigned> def_index;
225
226    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
227        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
228          def_index() {
229
230        build_def_index(this);
231    }
232
233    /* expose begin() / end() to enable range-based for */
234    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
235    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
236    /* given an offset into the module, produce an iterator there. */
237    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
238
239    /* gets an iterator to the definition of an id */
240    spirv_inst_iter get_def(unsigned id) const {
241        auto it = def_index.find(id);
242        if (it == def_index.end()) {
243            return end();
244        }
245        return at(it->second);
246    }
247};
248
249// TODO : Do we need to guard access to layer_data_map w/ lock?
250static unordered_map<void *, layer_data *> layer_data_map;
251
252// TODO : This can be much smarter, using separate locks for separate global data
253static int globalLockInitialized = 0;
254static loader_platform_thread_mutex globalLock;
255#if MTMERGESOURCE
256// MTMERGESOURCE - start of direct pull
257static VkPhysicalDeviceMemoryProperties memProps;
258
259static void clear_cmd_buf_and_mem_references(layer_data *my_data, const VkCommandBuffer cb);
260
261#define MAX_BINDING 0xFFFFFFFF
262
263static MT_OBJ_BINDING_INFO *get_object_binding_info(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
264    MT_OBJ_BINDING_INFO *retValue = NULL;
265    switch (type) {
266    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
267        auto it = my_data->imageBindingMap.find(handle);
268        if (it != my_data->imageBindingMap.end())
269            return &(*it).second;
270        break;
271    }
272    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
273        auto it = my_data->bufferBindingMap.find(handle);
274        if (it != my_data->bufferBindingMap.end())
275            return &(*it).second;
276        break;
277    }
278    default:
279        break;
280    }
281    return retValue;
282}
283// MTMERGESOURCE - end section
284#endif
285template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map);
286
287// prototype
288static GLOBAL_CB_NODE *getCBNode(layer_data *, const VkCommandBuffer);
289
290#if MTMERGESOURCE
291static void delete_queue_info_list(layer_data *my_data) {
292    // Process queue list, cleaning up each entry before deleting
293    my_data->queueMap.clear();
294}
295
296// Delete CBInfo from container and clear mem references to CB
297static void delete_cmd_buf_info(layer_data *my_data, VkCommandPool commandPool, const VkCommandBuffer cb) {
298    clear_cmd_buf_and_mem_references(my_data, cb);
299    // Delete the CBInfo info
300    my_data->commandPoolMap[commandPool].commandBuffers.remove(cb);
301    my_data->commandBufferMap.erase(cb);
302}
303
304static void add_object_binding_info(layer_data *my_data, const uint64_t handle, const VkDebugReportObjectTypeEXT type,
305                                    const VkDeviceMemory mem) {
306    switch (type) {
307    // Buffers and images are unique as their CreateInfo is in container struct
308    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
309        auto pCI = &my_data->bufferBindingMap[handle];
310        pCI->mem = mem;
311        break;
312    }
313    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
314        auto pCI = &my_data->imageBindingMap[handle];
315        pCI->mem = mem;
316        break;
317    }
318    default:
319        break;
320    }
321}
322
323static void add_object_create_info(layer_data *my_data, const uint64_t handle, const VkDebugReportObjectTypeEXT type,
324                                   const void *pCreateInfo) {
325    // TODO : For any CreateInfo struct that has ptrs, need to deep copy them and appropriately clean up on Destroy
326    switch (type) {
327    // Buffers and images are unique as their CreateInfo is in container struct
328    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
329        auto pCI = &my_data->bufferBindingMap[handle];
330        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
331        memcpy(&pCI->create_info.buffer, pCreateInfo, sizeof(VkBufferCreateInfo));
332        break;
333    }
334    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
335        auto pCI = &my_data->imageBindingMap[handle];
336        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
337        memcpy(&pCI->create_info.image, pCreateInfo, sizeof(VkImageCreateInfo));
338        break;
339    }
340    // Swap Chain is very unique, use my_data->imageBindingMap, but copy in
341    // SwapChainCreatInfo's usage flags and set the mem value to a unique key. These is used by
342    // vkCreateImageView and internal mem_tracker routines to distinguish swap chain images
343    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT: {
344        auto pCI = &my_data->imageBindingMap[handle];
345        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
346        pCI->mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
347        pCI->valid = false;
348        pCI->create_info.image.usage =
349            const_cast<VkSwapchainCreateInfoKHR *>(static_cast<const VkSwapchainCreateInfoKHR *>(pCreateInfo))->imageUsage;
350        break;
351    }
352    default:
353        break;
354    }
355}
356
357// Add a fence, creating one if necessary to our list of fences/fenceIds
358static VkBool32 add_fence_info(layer_data *my_data, VkFence fence, VkQueue queue, uint64_t *fenceId) {
359    VkBool32 skipCall = VK_FALSE;
360    *fenceId = my_data->currentFenceId++;
361
362    // If no fence, create an internal fence to track the submissions
363    if (fence != VK_NULL_HANDLE) {
364        my_data->fenceMap[fence].fenceId = *fenceId;
365        my_data->fenceMap[fence].queue = queue;
366        // Validate that fence is in UNSIGNALED state
367        VkFenceCreateInfo *pFenceCI = &(my_data->fenceMap[fence].createInfo);
368        if (pFenceCI->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
369            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
370                               (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
371                               "Fence %#" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
372                               (uint64_t)fence);
373        }
374    } else {
375        // TODO : Do we need to create an internal fence here for tracking purposes?
376    }
377    // Update most recently submitted fence and fenceId for Queue
378    my_data->queueMap[queue].lastSubmittedId = *fenceId;
379    return skipCall;
380}
381
382// Remove a fenceInfo from our list of fences/fenceIds
383static void delete_fence_info(layer_data *my_data, VkFence fence) { my_data->fenceMap.erase(fence); }
384
385// Record information when a fence is known to be signalled
386static void update_fence_tracking(layer_data *my_data, VkFence fence) {
387    auto fence_item = my_data->fenceMap.find(fence);
388    if (fence_item != my_data->fenceMap.end()) {
389        FENCE_NODE *pCurFenceInfo = &(*fence_item).second;
390        VkQueue queue = pCurFenceInfo->queue;
391        auto queue_item = my_data->queueMap.find(queue);
392        if (queue_item != my_data->queueMap.end()) {
393            QUEUE_NODE *pQueueInfo = &(*queue_item).second;
394            if (pQueueInfo->lastRetiredId < pCurFenceInfo->fenceId) {
395                pQueueInfo->lastRetiredId = pCurFenceInfo->fenceId;
396            }
397        }
398    }
399
400    // Update fence state in fenceCreateInfo structure
401    auto pFCI = &(my_data->fenceMap[fence].createInfo);
402    pFCI->flags = static_cast<VkFenceCreateFlags>(pFCI->flags | VK_FENCE_CREATE_SIGNALED_BIT);
403}
404
405// Helper routine that updates the fence list for a specific queue to all-retired
406static void retire_queue_fences(layer_data *my_data, VkQueue queue) {
407    QUEUE_NODE *pQueueInfo = &my_data->queueMap[queue];
408    // Set queue's lastRetired to lastSubmitted indicating all fences completed
409    pQueueInfo->lastRetiredId = pQueueInfo->lastSubmittedId;
410}
411
412// Helper routine that updates all queues to all-retired
413static void retire_device_fences(layer_data *my_data, VkDevice device) {
414    // Process each queue for device
415    // TODO: Add multiple device support
416    for (auto ii = my_data->queueMap.begin(); ii != my_data->queueMap.end(); ++ii) {
417        // Set queue's lastRetired to lastSubmitted indicating all fences completed
418        QUEUE_NODE *pQueueInfo = &(*ii).second;
419        pQueueInfo->lastRetiredId = pQueueInfo->lastSubmittedId;
420    }
421}
422
423// Helper function to validate correct usage bits set for buffers or images
424//  Verify that (actual & desired) flags != 0 or,
425//   if strict is true, verify that (actual & desired) flags == desired
426//  In case of error, report it via dbg callbacks
427static VkBool32 validate_usage_flags(layer_data *my_data, void *disp_obj, VkFlags actual, VkFlags desired, VkBool32 strict,
428                                     uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
429                                     char const *func_name, char const *usage_str) {
430    VkBool32 correct_usage = VK_FALSE;
431    VkBool32 skipCall = VK_FALSE;
432    if (strict)
433        correct_usage = ((actual & desired) == desired);
434    else
435        correct_usage = ((actual & desired) != 0);
436    if (!correct_usage) {
437        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
438                           MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s %#" PRIxLEAST64
439                                                               " used by %s. In this case, %s should have %s set during creation.",
440                           ty_str, obj_handle, func_name, ty_str, usage_str);
441    }
442    return skipCall;
443}
444
445// Helper function to validate usage flags for images
446// Pulls image info and then sends actual vs. desired usage off to helper above where
447//  an error will be flagged if usage is not correct
448static VkBool32 validate_image_usage_flags(layer_data *my_data, void *disp_obj, VkImage image, VkFlags desired, VkBool32 strict,
449                                           char const *func_name, char const *usage_string) {
450    VkBool32 skipCall = VK_FALSE;
451    MT_OBJ_BINDING_INFO *pBindInfo = get_object_binding_info(my_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
452    if (pBindInfo) {
453        skipCall = validate_usage_flags(my_data, disp_obj, pBindInfo->create_info.image.usage, desired, strict, (uint64_t)image,
454                                        VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "image", func_name, usage_string);
455    }
456    return skipCall;
457}
458
459// Helper function to validate usage flags for buffers
460// Pulls buffer info and then sends actual vs. desired usage off to helper above where
461//  an error will be flagged if usage is not correct
462static VkBool32 validate_buffer_usage_flags(layer_data *my_data, void *disp_obj, VkBuffer buffer, VkFlags desired, VkBool32 strict,
463                                            char const *func_name, char const *usage_string) {
464    VkBool32 skipCall = VK_FALSE;
465    MT_OBJ_BINDING_INFO *pBindInfo = get_object_binding_info(my_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
466    if (pBindInfo) {
467        skipCall = validate_usage_flags(my_data, disp_obj, pBindInfo->create_info.buffer.usage, desired, strict, (uint64_t)buffer,
468                                        VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string);
469    }
470    return skipCall;
471}
472
473// Return ptr to info in map container containing mem, or NULL if not found
474//  Calls to this function should be wrapped in mutex
475static DEVICE_MEM_INFO *get_mem_obj_info(layer_data *dev_data, const VkDeviceMemory mem) {
476    auto item = dev_data->memObjMap.find(mem);
477    if (item != dev_data->memObjMap.end()) {
478        return &(*item).second;
479    } else {
480        return NULL;
481    }
482}
483
484static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
485                             const VkMemoryAllocateInfo *pAllocateInfo) {
486    assert(object != NULL);
487
488    memcpy(&my_data->memObjMap[mem].allocInfo, pAllocateInfo, sizeof(VkMemoryAllocateInfo));
489    // TODO:  Update for real hardware, actually process allocation info structures
490    my_data->memObjMap[mem].allocInfo.pNext = NULL;
491    my_data->memObjMap[mem].object = object;
492    my_data->memObjMap[mem].mem = mem;
493    my_data->memObjMap[mem].image = VK_NULL_HANDLE;
494    my_data->memObjMap[mem].memRange.offset = 0;
495    my_data->memObjMap[mem].memRange.size = 0;
496    my_data->memObjMap[mem].pData = 0;
497    my_data->memObjMap[mem].pDriverData = 0;
498    my_data->memObjMap[mem].valid = false;
499}
500
501static VkBool32 validate_memory_is_valid(layer_data *dev_data, VkDeviceMemory mem, const char *functionName,
502                                         VkImage image = VK_NULL_HANDLE) {
503    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
504        MT_OBJ_BINDING_INFO *pBindInfo =
505            get_object_binding_info(dev_data, reinterpret_cast<const uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
506        if (pBindInfo && !pBindInfo->valid) {
507            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
508                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
509                           "%s: Cannot read invalid swapchain image %" PRIx64 ", please fill the memory before using.",
510                           functionName, (uint64_t)(image));
511        }
512    } else {
513        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
514        if (pMemObj && !pMemObj->valid) {
515            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
516                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
517                           "%s: Cannot read invalid memory %" PRIx64 ", please fill the memory before using.", functionName,
518                           (uint64_t)(mem));
519        }
520    }
521    return false;
522}
523
524static void set_memory_valid(layer_data *dev_data, VkDeviceMemory mem, bool valid, VkImage image = VK_NULL_HANDLE) {
525    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
526        MT_OBJ_BINDING_INFO *pBindInfo =
527            get_object_binding_info(dev_data, reinterpret_cast<const uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
528        if (pBindInfo) {
529            pBindInfo->valid = valid;
530        }
531    } else {
532        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
533        if (pMemObj) {
534            pMemObj->valid = valid;
535        }
536    }
537}
538
539// Find CB Info and add mem reference to list container
540// Find Mem Obj Info and add CB reference to list container
541static VkBool32 update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
542                                                  const char *apiName) {
543    VkBool32 skipCall = VK_FALSE;
544
545    // Skip validation if this image was created through WSI
546    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
547
548        // First update CB binding in MemObj mini CB list
549        DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
550        if (pMemInfo) {
551            pMemInfo->commandBufferBindings.insert(cb);
552            // Now update CBInfo's Mem reference list
553            GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
554            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
555            if (pCBNode) {
556                pCBNode->memObjs.insert(mem);
557            }
558        }
559    }
560    return skipCall;
561}
562
563// Free bindings related to CB
564static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
565    GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
566
567    if (pCBNode) {
568        if (pCBNode->memObjs.size() > 0) {
569            for (auto mem : pCBNode->memObjs) {
570                DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
571                if (pInfo) {
572                    pInfo->commandBufferBindings.erase(cb);
573                }
574            }
575            pCBNode->memObjs.clear();
576        }
577        pCBNode->validate_functions.clear();
578    }
579}
580
581// Delete the entire CB list
582static void delete_cmd_buf_info_list(layer_data *my_data) {
583    for (auto &cb_node : my_data->commandBufferMap) {
584        clear_cmd_buf_and_mem_references(my_data, cb_node.first);
585    }
586    my_data->commandBufferMap.clear();
587}
588
589// For given MemObjInfo, report Obj & CB bindings
590static VkBool32 reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
591    VkBool32 skipCall = VK_FALSE;
592    size_t cmdBufRefCount = pMemObjInfo->commandBufferBindings.size();
593    size_t objRefCount = pMemObjInfo->objBindings.size();
594
595    if ((pMemObjInfo->commandBufferBindings.size()) != 0) {
596        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
597                           (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
598                           "Attempting to free memory object %#" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
599                           " references",
600                           (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
601    }
602
603    if (cmdBufRefCount > 0 && pMemObjInfo->commandBufferBindings.size() > 0) {
604        for (auto cb : pMemObjInfo->commandBufferBindings) {
605            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
606                    (uint64_t)cb, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
607                    "Command Buffer %p still has a reference to mem obj %#" PRIxLEAST64, cb, (uint64_t)pMemObjInfo->mem);
608        }
609        // Clear the list of hanging references
610        pMemObjInfo->commandBufferBindings.clear();
611    }
612
613    if (objRefCount > 0 && pMemObjInfo->objBindings.size() > 0) {
614        for (auto obj : pMemObjInfo->objBindings) {
615            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__,
616                    MEMTRACK_FREED_MEM_REF, "MEM", "VK Object %#" PRIxLEAST64 " still has a reference to mem obj %#" PRIxLEAST64,
617                    obj.handle, (uint64_t)pMemObjInfo->mem);
618        }
619        // Clear the list of hanging references
620        pMemObjInfo->objBindings.clear();
621    }
622    return skipCall;
623}
624
625static VkBool32 deleteMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem) {
626    VkBool32 skipCall = VK_FALSE;
627    auto item = my_data->memObjMap.find(mem);
628    if (item != my_data->memObjMap.end()) {
629        my_data->memObjMap.erase(item);
630    } else {
631        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
632                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
633                           "Request to delete memory object %#" PRIxLEAST64 " not present in memory Object Map", (uint64_t)mem);
634    }
635    return skipCall;
636}
637
638// Check if fence for given CB is completed
639static bool checkCBCompleted(layer_data *my_data, const VkCommandBuffer cb, bool *complete) {
640    GLOBAL_CB_NODE *pCBNode = getCBNode(my_data, cb);
641    VkBool32 skipCall = false;
642    *complete = true;
643
644    if (pCBNode) {
645        if (pCBNode->lastSubmittedQueue != NULL) {
646            VkQueue queue = pCBNode->lastSubmittedQueue;
647            QUEUE_NODE *pQueueInfo = &my_data->queueMap[queue];
648            if (pCBNode->fenceId > pQueueInfo->lastRetiredId) {
649                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
650                                   VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)cb, __LINE__, MEMTRACK_NONE, "MEM",
651                                   "fence %#" PRIxLEAST64 " for CB %p has not been checked for completion",
652                                   (uint64_t)pCBNode->lastSubmittedFence, cb);
653                *complete = false;
654            }
655        }
656    }
657    return skipCall;
658}
659
660static VkBool32 freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, VkBool32 internal) {
661    VkBool32 skipCall = VK_FALSE;
662    // Parse global list to find info w/ mem
663    DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
664    if (pInfo) {
665        if (pInfo->allocInfo.allocationSize == 0 && !internal) {
666            // TODO: Verify against Valid Use section
667            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
668                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
669                               "Attempting to free memory associated with a Persistent Image, %#" PRIxLEAST64 ", "
670                               "this should not be explicitly freed\n",
671                               (uint64_t)mem);
672        } else {
673            // Clear any CB bindings for completed CBs
674            //   TODO : Is there a better place to do this?
675
676            assert(pInfo->object != VK_NULL_HANDLE);
677            // clear_cmd_buf_and_mem_references removes elements from
678            // pInfo->commandBufferBindings -- this copy not needed in c++14,
679            // and probably not needed in practice in c++11
680            auto bindings = pInfo->commandBufferBindings;
681            for (auto cb : bindings) {
682                bool commandBufferComplete = false;
683                skipCall |= checkCBCompleted(dev_data, cb, &commandBufferComplete);
684                if (commandBufferComplete) {
685                    clear_cmd_buf_and_mem_references(dev_data, cb);
686                }
687            }
688
689            // Now verify that no references to this mem obj remain and remove bindings
690            if (pInfo->commandBufferBindings.size() || pInfo->objBindings.size()) {
691                skipCall |= reportMemReferencesAndCleanUp(dev_data, pInfo);
692            }
693            // Delete mem obj info
694            skipCall |= deleteMemObjInfo(dev_data, object, mem);
695        }
696    }
697    return skipCall;
698}
699
700static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
701    switch (type) {
702    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
703        return "image";
704    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
705        return "buffer";
706    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
707        return "swapchain";
708    default:
709        return "unknown";
710    }
711}
712
713// Remove object binding performs 3 tasks:
714// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
715// 2. Clear mem binding for image/buffer by setting its handle to 0
716// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
717static VkBool32 clear_object_binding(layer_data *dev_data, void *dispObj, uint64_t handle, VkDebugReportObjectTypeEXT type) {
718    // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
719    VkBool32 skipCall = VK_FALSE;
720    MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
721    if (pObjBindInfo) {
722        DEVICE_MEM_INFO *pMemObjInfo = get_mem_obj_info(dev_data, pObjBindInfo->mem);
723        // TODO : Make sure this is a reasonable way to reset mem binding
724        pObjBindInfo->mem = VK_NULL_HANDLE;
725        if (pMemObjInfo) {
726            // This obj is bound to a memory object. Remove the reference to this object in that memory object's list,
727            // and set the objects memory binding pointer to NULL.
728            if (!pMemObjInfo->objBindings.erase({handle, type})) {
729                skipCall |=
730                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
731                            "MEM", "While trying to clear mem binding for %s obj %#" PRIxLEAST64
732                                   ", unable to find that object referenced by mem obj %#" PRIxLEAST64,
733                            object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
734            }
735        }
736    }
737    return skipCall;
738}
739
740// For NULL mem case, output warning
741// Make sure given object is in global object map
742//  IF a previous binding existed, output validation error
743//  Otherwise, add reference from objectInfo to memoryInfo
744//  Add reference off of objInfo
745//  device is required for error logging, need a dispatchable
746//  object for that.
747static VkBool32 set_mem_binding(layer_data *dev_data, void *dispatch_object, VkDeviceMemory mem, uint64_t handle,
748                                VkDebugReportObjectTypeEXT type, const char *apiName) {
749    VkBool32 skipCall = VK_FALSE;
750    // Handle NULL case separately, just clear previous binding & decrement reference
751    if (mem == VK_NULL_HANDLE) {
752        // TODO: Verify against Valid Use section of spec.
753        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
754                           "MEM", "In %s, attempting to Bind Obj(%#" PRIxLEAST64 ") to NULL", apiName, handle);
755    } else {
756        MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
757        if (!pObjBindInfo) {
758            skipCall |=
759                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS,
760                        "MEM", "In %s, attempting to update Binding of %s Obj(%#" PRIxLEAST64 ") that's not in global list",
761                        object_type_to_string(type), apiName, handle);
762        } else {
763            // non-null case so should have real mem obj
764            DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
765            if (pMemInfo) {
766                // TODO : Need to track mem binding for obj and report conflict here
767                DEVICE_MEM_INFO *pPrevBinding = get_mem_obj_info(dev_data, pObjBindInfo->mem);
768                if (pPrevBinding != NULL) {
769                    skipCall |=
770                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
771                                (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
772                                "In %s, attempting to bind memory (%#" PRIxLEAST64 ") to object (%#" PRIxLEAST64
773                                ") which has already been bound to mem object %#" PRIxLEAST64,
774                                apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem);
775                } else {
776                    pMemInfo->objBindings.insert({handle, type});
777                    // For image objects, make sure default memory state is correctly set
778                    // TODO : What's the best/correct way to handle this?
779                    if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
780                        VkImageCreateInfo ici = pObjBindInfo->create_info.image;
781                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
782                            // TODO::  More memory state transition stuff.
783                        }
784                    }
785                    pObjBindInfo->mem = mem;
786                }
787            }
788        }
789    }
790    return skipCall;
791}
792
793// For NULL mem case, clear any previous binding Else...
794// Make sure given object is in its object map
795//  IF a previous binding existed, update binding
796//  Add reference from objectInfo to memoryInfo
797//  Add reference off of object's binding info
798// Return VK_TRUE if addition is successful, VK_FALSE otherwise
799static VkBool32 set_sparse_mem_binding(layer_data *dev_data, void *dispObject, VkDeviceMemory mem, uint64_t handle,
800                                       VkDebugReportObjectTypeEXT type, const char *apiName) {
801    VkBool32 skipCall = VK_FALSE;
802    // Handle NULL case separately, just clear previous binding & decrement reference
803    if (mem == VK_NULL_HANDLE) {
804        skipCall = clear_object_binding(dev_data, dispObject, handle, type);
805    } else {
806        MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
807        if (!pObjBindInfo) {
808            skipCall |= log_msg(
809                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS, "MEM",
810                "In %s, attempting to update Binding of Obj(%#" PRIxLEAST64 ") that's not in global list()", apiName, handle);
811        }
812        // non-null case so should have real mem obj
813        DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
814        if (pInfo) {
815            pInfo->objBindings.insert({handle, type});
816            // Need to set mem binding for this object
817            pObjBindInfo->mem = mem;
818        }
819    }
820    return skipCall;
821}
822
823template <typename T>
824void print_object_map_members(layer_data *my_data, void *dispObj, T const &objectName, VkDebugReportObjectTypeEXT objectType,
825                              const char *objectStr) {
826    for (auto const &element : objectName) {
827        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objectType, 0, __LINE__, MEMTRACK_NONE, "MEM",
828                "    %s Object list contains %s Object %#" PRIxLEAST64 " ", objectStr, objectStr, element.first);
829    }
830}
831
832// For given Object, get 'mem' obj that it's bound to or NULL if no binding
833static VkBool32 get_mem_binding_from_object(layer_data *my_data, void *dispObj, const uint64_t handle,
834                                            const VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
835    VkBool32 skipCall = VK_FALSE;
836    *mem = VK_NULL_HANDLE;
837    MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(my_data, handle, type);
838    if (pObjBindInfo) {
839        if (pObjBindInfo->mem) {
840            *mem = pObjBindInfo->mem;
841        } else {
842            skipCall =
843                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS,
844                        "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but object has no mem binding", handle);
845        }
846    } else {
847        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
848                           "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but no such object in %s list", handle,
849                           object_type_to_string(type));
850    }
851    return skipCall;
852}
853
854// Print details of MemObjInfo list
855static void print_mem_list(layer_data *dev_data, void *dispObj) {
856    DEVICE_MEM_INFO *pInfo = NULL;
857
858    // Early out if info is not requested
859    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
860        return;
861    }
862
863    // Just printing each msg individually for now, may want to package these into single large print
864    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
865            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
866            dev_data->memObjMap.size());
867    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
868            MEMTRACK_NONE, "MEM", "=============================");
869
870    if (dev_data->memObjMap.size() <= 0)
871        return;
872
873    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
874        pInfo = &(*ii).second;
875
876        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
877                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at %p===", (void *)pInfo);
878        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
879                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: %#" PRIxLEAST64, (uint64_t)(pInfo->mem));
880        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
881                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: " PRINTF_SIZE_T_SPECIFIER,
882                pInfo->commandBufferBindings.size() + pInfo->objBindings.size());
883        if (0 != pInfo->allocInfo.allocationSize) {
884            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&pInfo->allocInfo, "MEM(INFO):         ");
885            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
886                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
887        } else {
888            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
889                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
890        }
891
892        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
893                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
894                pInfo->objBindings.size());
895        if (pInfo->objBindings.size() > 0) {
896            for (auto obj : pInfo->objBindings) {
897                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
898                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT %" PRIu64, obj.handle);
899            }
900        }
901
902        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
903                __LINE__, MEMTRACK_NONE, "MEM",
904                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
905                pInfo->commandBufferBindings.size());
906        if (pInfo->commandBufferBindings.size() > 0) {
907            for (auto cb : pInfo->commandBufferBindings) {
908                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
909                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB %p", cb);
910            }
911        }
912    }
913}
914
915static void printCBList(layer_data *my_data, void *dispObj) {
916    GLOBAL_CB_NODE *pCBInfo = NULL;
917
918    // Early out if info is not requested
919    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
920        return;
921    }
922
923    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
924            MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
925            my_data->commandBufferMap.size());
926    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
927            MEMTRACK_NONE, "MEM", "==================");
928
929    if (my_data->commandBufferMap.size() <= 0)
930        return;
931
932    for (auto &cb_node : my_data->commandBufferMap) {
933        pCBInfo = cb_node.second;
934
935        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
936                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (%p) has CB %p, fenceId %" PRIx64 ", and fence %#" PRIxLEAST64,
937                (void *)pCBInfo, (void *)pCBInfo->commandBuffer, pCBInfo->fenceId, (uint64_t)pCBInfo->lastSubmittedFence);
938
939        if (pCBInfo->memObjs.size() <= 0)
940            continue;
941        for (auto obj : pCBInfo->memObjs) {
942            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
943                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj %" PRIu64, (uint64_t)obj);
944        }
945    }
946}
947
948#endif
949
950// Return a string representation of CMD_TYPE enum
951static string cmdTypeToString(CMD_TYPE cmd) {
952    switch (cmd) {
953    case CMD_BINDPIPELINE:
954        return "CMD_BINDPIPELINE";
955    case CMD_BINDPIPELINEDELTA:
956        return "CMD_BINDPIPELINEDELTA";
957    case CMD_SETVIEWPORTSTATE:
958        return "CMD_SETVIEWPORTSTATE";
959    case CMD_SETLINEWIDTHSTATE:
960        return "CMD_SETLINEWIDTHSTATE";
961    case CMD_SETDEPTHBIASSTATE:
962        return "CMD_SETDEPTHBIASSTATE";
963    case CMD_SETBLENDSTATE:
964        return "CMD_SETBLENDSTATE";
965    case CMD_SETDEPTHBOUNDSSTATE:
966        return "CMD_SETDEPTHBOUNDSSTATE";
967    case CMD_SETSTENCILREADMASKSTATE:
968        return "CMD_SETSTENCILREADMASKSTATE";
969    case CMD_SETSTENCILWRITEMASKSTATE:
970        return "CMD_SETSTENCILWRITEMASKSTATE";
971    case CMD_SETSTENCILREFERENCESTATE:
972        return "CMD_SETSTENCILREFERENCESTATE";
973    case CMD_BINDDESCRIPTORSETS:
974        return "CMD_BINDDESCRIPTORSETS";
975    case CMD_BINDINDEXBUFFER:
976        return "CMD_BINDINDEXBUFFER";
977    case CMD_BINDVERTEXBUFFER:
978        return "CMD_BINDVERTEXBUFFER";
979    case CMD_DRAW:
980        return "CMD_DRAW";
981    case CMD_DRAWINDEXED:
982        return "CMD_DRAWINDEXED";
983    case CMD_DRAWINDIRECT:
984        return "CMD_DRAWINDIRECT";
985    case CMD_DRAWINDEXEDINDIRECT:
986        return "CMD_DRAWINDEXEDINDIRECT";
987    case CMD_DISPATCH:
988        return "CMD_DISPATCH";
989    case CMD_DISPATCHINDIRECT:
990        return "CMD_DISPATCHINDIRECT";
991    case CMD_COPYBUFFER:
992        return "CMD_COPYBUFFER";
993    case CMD_COPYIMAGE:
994        return "CMD_COPYIMAGE";
995    case CMD_BLITIMAGE:
996        return "CMD_BLITIMAGE";
997    case CMD_COPYBUFFERTOIMAGE:
998        return "CMD_COPYBUFFERTOIMAGE";
999    case CMD_COPYIMAGETOBUFFER:
1000        return "CMD_COPYIMAGETOBUFFER";
1001    case CMD_CLONEIMAGEDATA:
1002        return "CMD_CLONEIMAGEDATA";
1003    case CMD_UPDATEBUFFER:
1004        return "CMD_UPDATEBUFFER";
1005    case CMD_FILLBUFFER:
1006        return "CMD_FILLBUFFER";
1007    case CMD_CLEARCOLORIMAGE:
1008        return "CMD_CLEARCOLORIMAGE";
1009    case CMD_CLEARATTACHMENTS:
1010        return "CMD_CLEARCOLORATTACHMENT";
1011    case CMD_CLEARDEPTHSTENCILIMAGE:
1012        return "CMD_CLEARDEPTHSTENCILIMAGE";
1013    case CMD_RESOLVEIMAGE:
1014        return "CMD_RESOLVEIMAGE";
1015    case CMD_SETEVENT:
1016        return "CMD_SETEVENT";
1017    case CMD_RESETEVENT:
1018        return "CMD_RESETEVENT";
1019    case CMD_WAITEVENTS:
1020        return "CMD_WAITEVENTS";
1021    case CMD_PIPELINEBARRIER:
1022        return "CMD_PIPELINEBARRIER";
1023    case CMD_BEGINQUERY:
1024        return "CMD_BEGINQUERY";
1025    case CMD_ENDQUERY:
1026        return "CMD_ENDQUERY";
1027    case CMD_RESETQUERYPOOL:
1028        return "CMD_RESETQUERYPOOL";
1029    case CMD_COPYQUERYPOOLRESULTS:
1030        return "CMD_COPYQUERYPOOLRESULTS";
1031    case CMD_WRITETIMESTAMP:
1032        return "CMD_WRITETIMESTAMP";
1033    case CMD_INITATOMICCOUNTERS:
1034        return "CMD_INITATOMICCOUNTERS";
1035    case CMD_LOADATOMICCOUNTERS:
1036        return "CMD_LOADATOMICCOUNTERS";
1037    case CMD_SAVEATOMICCOUNTERS:
1038        return "CMD_SAVEATOMICCOUNTERS";
1039    case CMD_BEGINRENDERPASS:
1040        return "CMD_BEGINRENDERPASS";
1041    case CMD_ENDRENDERPASS:
1042        return "CMD_ENDRENDERPASS";
1043    default:
1044        return "UNKNOWN";
1045    }
1046}
1047
1048// SPIRV utility functions
1049static void build_def_index(shader_module *module) {
1050    for (auto insn : *module) {
1051        switch (insn.opcode()) {
1052        /* Types */
1053        case spv::OpTypeVoid:
1054        case spv::OpTypeBool:
1055        case spv::OpTypeInt:
1056        case spv::OpTypeFloat:
1057        case spv::OpTypeVector:
1058        case spv::OpTypeMatrix:
1059        case spv::OpTypeImage:
1060        case spv::OpTypeSampler:
1061        case spv::OpTypeSampledImage:
1062        case spv::OpTypeArray:
1063        case spv::OpTypeRuntimeArray:
1064        case spv::OpTypeStruct:
1065        case spv::OpTypeOpaque:
1066        case spv::OpTypePointer:
1067        case spv::OpTypeFunction:
1068        case spv::OpTypeEvent:
1069        case spv::OpTypeDeviceEvent:
1070        case spv::OpTypeReserveId:
1071        case spv::OpTypeQueue:
1072        case spv::OpTypePipe:
1073            module->def_index[insn.word(1)] = insn.offset();
1074            break;
1075
1076        /* Fixed constants */
1077        case spv::OpConstantTrue:
1078        case spv::OpConstantFalse:
1079        case spv::OpConstant:
1080        case spv::OpConstantComposite:
1081        case spv::OpConstantSampler:
1082        case spv::OpConstantNull:
1083            module->def_index[insn.word(2)] = insn.offset();
1084            break;
1085
1086        /* Specialization constants */
1087        case spv::OpSpecConstantTrue:
1088        case spv::OpSpecConstantFalse:
1089        case spv::OpSpecConstant:
1090        case spv::OpSpecConstantComposite:
1091        case spv::OpSpecConstantOp:
1092            module->def_index[insn.word(2)] = insn.offset();
1093            break;
1094
1095        /* Variables */
1096        case spv::OpVariable:
1097            module->def_index[insn.word(2)] = insn.offset();
1098            break;
1099
1100        /* Functions */
1101        case spv::OpFunction:
1102            module->def_index[insn.word(2)] = insn.offset();
1103            break;
1104
1105        default:
1106            /* We don't care about any other defs for now. */
1107            break;
1108        }
1109    }
1110}
1111
1112static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1113    for (auto insn : *src) {
1114        if (insn.opcode() == spv::OpEntryPoint) {
1115            auto entrypointName = (char const *)&insn.word(3);
1116            auto entrypointStageBits = 1u << insn.word(1);
1117
1118            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1119                return insn;
1120            }
1121        }
1122    }
1123
1124    return src->end();
1125}
1126
1127bool shader_is_spirv(VkShaderModuleCreateInfo const *pCreateInfo) {
1128    uint32_t *words = (uint32_t *)pCreateInfo->pCode;
1129    size_t sizeInWords = pCreateInfo->codeSize / sizeof(uint32_t);
1130
1131    /* Just validate that the header makes sense. */
1132    return sizeInWords >= 5 && words[0] == spv::MagicNumber && words[1] == spv::Version;
1133}
1134
1135static char const *storage_class_name(unsigned sc) {
1136    switch (sc) {
1137    case spv::StorageClassInput:
1138        return "input";
1139    case spv::StorageClassOutput:
1140        return "output";
1141    case spv::StorageClassUniformConstant:
1142        return "const uniform";
1143    case spv::StorageClassUniform:
1144        return "uniform";
1145    case spv::StorageClassWorkgroup:
1146        return "workgroup local";
1147    case spv::StorageClassCrossWorkgroup:
1148        return "workgroup global";
1149    case spv::StorageClassPrivate:
1150        return "private global";
1151    case spv::StorageClassFunction:
1152        return "function";
1153    case spv::StorageClassGeneric:
1154        return "generic";
1155    case spv::StorageClassAtomicCounter:
1156        return "atomic counter";
1157    case spv::StorageClassImage:
1158        return "image";
1159    case spv::StorageClassPushConstant:
1160        return "push constant";
1161    default:
1162        return "unknown";
1163    }
1164}
1165
1166/* get the value of an integral constant */
1167unsigned get_constant_value(shader_module const *src, unsigned id) {
1168    auto value = src->get_def(id);
1169    assert(value != src->end());
1170
1171    if (value.opcode() != spv::OpConstant) {
1172        /* TODO: Either ensure that the specialization transform is already performed on a module we're
1173            considering here, OR -- specialize on the fly now.
1174            */
1175        return 1;
1176    }
1177
1178    return value.word(3);
1179}
1180
1181
1182static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1183    auto insn = src->get_def(type);
1184    assert(insn != src->end());
1185
1186    switch (insn.opcode()) {
1187    case spv::OpTypeBool:
1188        ss << "bool";
1189        break;
1190    case spv::OpTypeInt:
1191        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1192        break;
1193    case spv::OpTypeFloat:
1194        ss << "float" << insn.word(2);
1195        break;
1196    case spv::OpTypeVector:
1197        ss << "vec" << insn.word(3) << " of ";
1198        describe_type_inner(ss, src, insn.word(2));
1199        break;
1200    case spv::OpTypeMatrix:
1201        ss << "mat" << insn.word(3) << " of ";
1202        describe_type_inner(ss, src, insn.word(2));
1203        break;
1204    case spv::OpTypeArray:
1205        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1206        describe_type_inner(ss, src, insn.word(2));
1207        break;
1208    case spv::OpTypePointer:
1209        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1210        describe_type_inner(ss, src, insn.word(3));
1211        break;
1212    case spv::OpTypeStruct: {
1213        ss << "struct of (";
1214        for (unsigned i = 2; i < insn.len(); i++) {
1215            describe_type_inner(ss, src, insn.word(i));
1216            if (i == insn.len() - 1) {
1217                ss << ")";
1218            } else {
1219                ss << ", ";
1220            }
1221        }
1222        break;
1223    }
1224    case spv::OpTypeSampler:
1225        ss << "sampler";
1226        break;
1227    case spv::OpTypeSampledImage:
1228        ss << "sampler+";
1229        describe_type_inner(ss, src, insn.word(2));
1230        break;
1231    case spv::OpTypeImage:
1232        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1233        break;
1234    default:
1235        ss << "oddtype";
1236        break;
1237    }
1238}
1239
1240
1241static std::string describe_type(shader_module const *src, unsigned type) {
1242    std::ostringstream ss;
1243    describe_type_inner(ss, src, type);
1244    return ss.str();
1245}
1246
1247
1248static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed) {
1249    /* walk two type trees together, and complain about differences */
1250    auto a_insn = a->get_def(a_type);
1251    auto b_insn = b->get_def(b_type);
1252    assert(a_insn != a->end());
1253    assert(b_insn != b->end());
1254
1255    if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1256        return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed);
1257    }
1258
1259    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1260        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1261        return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false);
1262    }
1263
1264    if (a_insn.opcode() != b_insn.opcode()) {
1265        return false;
1266    }
1267
1268    if (a_insn.opcode() == spv::OpTypePointer) {
1269        /* match on pointee type. storage class is expected to differ */
1270        return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed);
1271    }
1272
1273    if (a_arrayed || b_arrayed) {
1274        /* if we havent resolved array-of-verts by here, we're not going to. */
1275        return false;
1276    }
1277
1278    switch (a_insn.opcode()) {
1279    case spv::OpTypeBool:
1280        return true;
1281    case spv::OpTypeInt:
1282        /* match on width, signedness */
1283        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1284    case spv::OpTypeFloat:
1285        /* match on width */
1286        return a_insn.word(2) == b_insn.word(2);
1287    case spv::OpTypeVector:
1288    case spv::OpTypeMatrix:
1289        /* match on element type, count. these all have the same layout. */
1290        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed) && a_insn.word(3) == b_insn.word(3);
1291    case spv::OpTypeArray:
1292        /* match on element type, count. these all have the same layout. we don't get here if
1293         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1294         * not a literal within OpTypeArray */
1295        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed) &&
1296               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1297    case spv::OpTypeStruct:
1298        /* match on all element types */
1299        {
1300            if (a_insn.len() != b_insn.len()) {
1301                return false; /* structs cannot match if member counts differ */
1302            }
1303
1304            for (unsigned i = 2; i < a_insn.len(); i++) {
1305                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed)) {
1306                    return false;
1307                }
1308            }
1309
1310            return true;
1311        }
1312    default:
1313        /* remaining types are CLisms, or may not appear in the interfaces we
1314         * are interested in. Just claim no match.
1315         */
1316        return false;
1317    }
1318}
1319
1320static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1321    auto it = map.find(id);
1322    if (it == map.end())
1323        return def;
1324    else
1325        return it->second;
1326}
1327
1328static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1329    auto insn = src->get_def(type);
1330    assert(insn != src->end());
1331
1332    switch (insn.opcode()) {
1333    case spv::OpTypePointer:
1334        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1335         * we're never actually passing pointers around. */
1336        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1337    case spv::OpTypeArray:
1338        if (strip_array_level) {
1339            return get_locations_consumed_by_type(src, insn.word(2), false);
1340        } else {
1341            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1342        }
1343    case spv::OpTypeMatrix:
1344        /* num locations is the dimension * element size */
1345        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1346    default:
1347        /* everything else is just 1. */
1348        return 1;
1349
1350        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1351         * multiple locations. */
1352    }
1353}
1354
1355typedef std::pair<unsigned, unsigned> location_t;
1356typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1357
1358struct interface_var {
1359    uint32_t id;
1360    uint32_t type_id;
1361    uint32_t offset;
1362    bool is_patch;
1363    /* TODO: collect the name, too? Isn't required to be present. */
1364};
1365
1366static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1367    while (true) {
1368
1369        if (def.opcode() == spv::OpTypePointer) {
1370            def = src->get_def(def.word(3));
1371        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1372            def = src->get_def(def.word(2));
1373            is_array_of_verts = false;
1374        } else if (def.opcode() == spv::OpTypeStruct) {
1375            return def;
1376        } else {
1377            return src->end();
1378        }
1379    }
1380}
1381
1382static void collect_interface_block_members(layer_data *my_data, shader_module const *src,
1383                                            std::map<location_t, interface_var> &out,
1384                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1385                                            uint32_t id, uint32_t type_id, bool is_patch) {
1386    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1387    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts);
1388    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1389        /* this isn't an interface block. */
1390        return;
1391    }
1392
1393    std::unordered_map<unsigned, unsigned> member_components;
1394
1395    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1396    for (auto insn : *src) {
1397        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1398            unsigned member_index = insn.word(2);
1399
1400            if (insn.word(3) == spv::DecorationComponent) {
1401                unsigned component = insn.word(4);
1402                member_components[member_index] = component;
1403            }
1404        }
1405    }
1406
1407    /* Second pass -- produce the output, from Location decorations */
1408    for (auto insn : *src) {
1409        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1410            unsigned member_index = insn.word(2);
1411            unsigned member_type_id = type.word(2 + member_index);
1412
1413            if (insn.word(3) == spv::DecorationLocation) {
1414                unsigned location = insn.word(4);
1415                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1416                auto component_it = member_components.find(member_index);
1417                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1418
1419                for (unsigned int offset = 0; offset < num_locations; offset++) {
1420                    interface_var v;
1421                    v.id = id;
1422                    /* TODO: member index in interface_var too? */
1423                    v.type_id = member_type_id;
1424                    v.offset = offset;
1425                    v.is_patch = is_patch;
1426                    out[std::make_pair(location + offset, component)] = v;
1427                }
1428            }
1429        }
1430    }
1431}
1432
1433static void collect_interface_by_location(layer_data *my_data, shader_module const *src, spirv_inst_iter entrypoint,
1434                                          spv::StorageClass sinterface, std::map<location_t, interface_var> &out,
1435                                          bool is_array_of_verts) {
1436    std::unordered_map<unsigned, unsigned> var_locations;
1437    std::unordered_map<unsigned, unsigned> var_builtins;
1438    std::unordered_map<unsigned, unsigned> var_components;
1439    std::unordered_map<unsigned, unsigned> blocks;
1440    std::unordered_map<unsigned, unsigned> var_patch;
1441
1442    for (auto insn : *src) {
1443
1444        /* We consider two interface models: SSO rendezvous-by-location, and
1445         * builtins. Complain about anything that fits neither model.
1446         */
1447        if (insn.opcode() == spv::OpDecorate) {
1448            if (insn.word(2) == spv::DecorationLocation) {
1449                var_locations[insn.word(1)] = insn.word(3);
1450            }
1451
1452            if (insn.word(2) == spv::DecorationBuiltIn) {
1453                var_builtins[insn.word(1)] = insn.word(3);
1454            }
1455
1456            if (insn.word(2) == spv::DecorationComponent) {
1457                var_components[insn.word(1)] = insn.word(3);
1458            }
1459
1460            if (insn.word(2) == spv::DecorationBlock) {
1461                blocks[insn.word(1)] = 1;
1462            }
1463
1464            if (insn.word(2) == spv::DecorationPatch) {
1465                var_patch[insn.word(1)] = 1;
1466            }
1467        }
1468    }
1469
1470    /* TODO: handle grouped decorations */
1471    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1472     * have the same location, and we DON'T want to clobber. */
1473
1474    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1475       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1476       the word to determine which word contains the terminator. */
1477    auto word = 3;
1478    while (entrypoint.word(word) & 0xff000000u) {
1479        ++word;
1480    }
1481    ++word;
1482
1483    for (; word < entrypoint.len(); word++) {
1484        auto insn = src->get_def(entrypoint.word(word));
1485        assert(insn != src->end());
1486        assert(insn.opcode() == spv::OpVariable);
1487
1488        if (insn.word(3) == sinterface) {
1489            unsigned id = insn.word(2);
1490            unsigned type = insn.word(1);
1491
1492            int location = value_or_default(var_locations, id, -1);
1493            int builtin = value_or_default(var_builtins, id, -1);
1494            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1495            bool is_patch = var_patch.find(id) != var_patch.end();
1496
1497            /* All variables and interface block members in the Input or Output storage classes
1498             * must be decorated with either a builtin or an explicit location.
1499             *
1500             * TODO: integrate the interface block support here. For now, don't complain --
1501             * a valid SPIRV module will only hit this path for the interface block case, as the
1502             * individual members of the type are decorated, rather than variable declarations.
1503             */
1504
1505            if (location != -1) {
1506                /* A user-defined interface variable, with a location. Where a variable
1507                 * occupied multiple locations, emit one result for each. */
1508                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1509                for (unsigned int offset = 0; offset < num_locations; offset++) {
1510                    interface_var v;
1511                    v.id = id;
1512                    v.type_id = type;
1513                    v.offset = offset;
1514                    v.is_patch = is_patch;
1515                    out[std::make_pair(location + offset, component)] = v;
1516                }
1517            } else if (builtin == -1) {
1518                /* An interface block instance */
1519                collect_interface_block_members(my_data, src, out, blocks, is_array_of_verts, id, type, is_patch);
1520            }
1521        }
1522    }
1523}
1524
1525static void collect_interface_by_descriptor_slot(layer_data *my_data, shader_module const *src,
1526                                                 std::unordered_set<uint32_t> const &accessible_ids,
1527                                                 std::map<descriptor_slot_t, interface_var> &out) {
1528
1529    std::unordered_map<unsigned, unsigned> var_sets;
1530    std::unordered_map<unsigned, unsigned> var_bindings;
1531
1532    for (auto insn : *src) {
1533        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1534         * DecorationDescriptorSet and DecorationBinding.
1535         */
1536        if (insn.opcode() == spv::OpDecorate) {
1537            if (insn.word(2) == spv::DecorationDescriptorSet) {
1538                var_sets[insn.word(1)] = insn.word(3);
1539            }
1540
1541            if (insn.word(2) == spv::DecorationBinding) {
1542                var_bindings[insn.word(1)] = insn.word(3);
1543            }
1544        }
1545    }
1546
1547    for (auto id : accessible_ids) {
1548        auto insn = src->get_def(id);
1549        assert(insn != src->end());
1550
1551        if (insn.opcode() == spv::OpVariable &&
1552            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1553            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1554            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1555
1556            auto existing_it = out.find(std::make_pair(set, binding));
1557            if (existing_it != out.end()) {
1558                /* conflict within spv image */
1559                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1560                        __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
1561                        "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition",
1562                        insn.word(2), insn.word(1), storage_class_name(insn.word(3)), existing_it->first.first,
1563                        existing_it->first.second);
1564            }
1565
1566            interface_var v;
1567            v.id = insn.word(2);
1568            v.type_id = insn.word(1);
1569            v.offset = 0;
1570            v.is_patch = false;
1571            out[std::make_pair(set, binding)] = v;
1572        }
1573    }
1574}
1575
1576static bool validate_interface_between_stages(layer_data *my_data, shader_module const *producer,
1577                                              spirv_inst_iter producer_entrypoint, char const *producer_name,
1578                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1579                                              char const *consumer_name, bool consumer_arrayed_input) {
1580    std::map<location_t, interface_var> outputs;
1581    std::map<location_t, interface_var> inputs;
1582
1583    bool pass = true;
1584
1585    collect_interface_by_location(my_data, producer, producer_entrypoint, spv::StorageClassOutput, outputs, false);
1586    collect_interface_by_location(my_data, consumer, consumer_entrypoint, spv::StorageClassInput, inputs,
1587                                  consumer_arrayed_input);
1588
1589    auto a_it = outputs.begin();
1590    auto b_it = inputs.begin();
1591
1592    /* maps sorted by key (location); walk them together to find mismatches */
1593    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1594        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1595        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1596        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1597        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1598
1599        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1600            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1601                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1602                        "%s writes to output location %u.%u which is not consumed by %s", producer_name, a_first.first,
1603                        a_first.second, consumer_name)) {
1604                pass = false;
1605            }
1606            a_it++;
1607        } else if (a_at_end || a_first > b_first) {
1608            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1609                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1610                        "%s consumes input location %u.%u which is not written by %s", consumer_name, b_first.first, b_first.second,
1611                        producer_name)) {
1612                pass = false;
1613            }
1614            b_it++;
1615        } else {
1616            if (types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id, false, consumer_arrayed_input)) {
1617                /* OK! */
1618            } else {
1619                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1620                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1621                            a_first.first, a_first.second,
1622                            describe_type(producer, a_it->second.type_id).c_str(),
1623                            describe_type(consumer, b_it->second.type_id).c_str())) {
1624                    pass = false;
1625                }
1626            }
1627            a_it++;
1628            b_it++;
1629        }
1630    }
1631
1632    return pass;
1633}
1634
1635enum FORMAT_TYPE {
1636    FORMAT_TYPE_UNDEFINED,
1637    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1638    FORMAT_TYPE_SINT,
1639    FORMAT_TYPE_UINT,
1640};
1641
1642static unsigned get_format_type(VkFormat fmt) {
1643    switch (fmt) {
1644    case VK_FORMAT_UNDEFINED:
1645        return FORMAT_TYPE_UNDEFINED;
1646    case VK_FORMAT_R8_SINT:
1647    case VK_FORMAT_R8G8_SINT:
1648    case VK_FORMAT_R8G8B8_SINT:
1649    case VK_FORMAT_R8G8B8A8_SINT:
1650    case VK_FORMAT_R16_SINT:
1651    case VK_FORMAT_R16G16_SINT:
1652    case VK_FORMAT_R16G16B16_SINT:
1653    case VK_FORMAT_R16G16B16A16_SINT:
1654    case VK_FORMAT_R32_SINT:
1655    case VK_FORMAT_R32G32_SINT:
1656    case VK_FORMAT_R32G32B32_SINT:
1657    case VK_FORMAT_R32G32B32A32_SINT:
1658    case VK_FORMAT_B8G8R8_SINT:
1659    case VK_FORMAT_B8G8R8A8_SINT:
1660    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1661    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1662        return FORMAT_TYPE_SINT;
1663    case VK_FORMAT_R8_UINT:
1664    case VK_FORMAT_R8G8_UINT:
1665    case VK_FORMAT_R8G8B8_UINT:
1666    case VK_FORMAT_R8G8B8A8_UINT:
1667    case VK_FORMAT_R16_UINT:
1668    case VK_FORMAT_R16G16_UINT:
1669    case VK_FORMAT_R16G16B16_UINT:
1670    case VK_FORMAT_R16G16B16A16_UINT:
1671    case VK_FORMAT_R32_UINT:
1672    case VK_FORMAT_R32G32_UINT:
1673    case VK_FORMAT_R32G32B32_UINT:
1674    case VK_FORMAT_R32G32B32A32_UINT:
1675    case VK_FORMAT_B8G8R8_UINT:
1676    case VK_FORMAT_B8G8R8A8_UINT:
1677    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1678    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1679        return FORMAT_TYPE_UINT;
1680    default:
1681        return FORMAT_TYPE_FLOAT;
1682    }
1683}
1684
1685/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1686 * for comparison to a VkFormat's characterization above. */
1687static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1688    auto insn = src->get_def(type);
1689    assert(insn != src->end());
1690
1691    switch (insn.opcode()) {
1692    case spv::OpTypeInt:
1693        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1694    case spv::OpTypeFloat:
1695        return FORMAT_TYPE_FLOAT;
1696    case spv::OpTypeVector:
1697        return get_fundamental_type(src, insn.word(2));
1698    case spv::OpTypeMatrix:
1699        return get_fundamental_type(src, insn.word(2));
1700    case spv::OpTypeArray:
1701        return get_fundamental_type(src, insn.word(2));
1702    case spv::OpTypePointer:
1703        return get_fundamental_type(src, insn.word(3));
1704    default:
1705        return FORMAT_TYPE_UNDEFINED;
1706    }
1707}
1708
1709static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1710    uint32_t bit_pos = u_ffs(stage);
1711    return bit_pos - 1;
1712}
1713
1714static bool validate_vi_consistency(layer_data *my_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1715    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1716     * each binding should be specified only once.
1717     */
1718    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1719    bool pass = true;
1720
1721    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1722        auto desc = &vi->pVertexBindingDescriptions[i];
1723        auto &binding = bindings[desc->binding];
1724        if (binding) {
1725            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1726                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1727                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1728                pass = false;
1729            }
1730        } else {
1731            binding = desc;
1732        }
1733    }
1734
1735    return pass;
1736}
1737
1738static bool validate_vi_against_vs_inputs(layer_data *my_data, VkPipelineVertexInputStateCreateInfo const *vi,
1739                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1740    std::map<location_t, interface_var> inputs;
1741    bool pass = true;
1742
1743    collect_interface_by_location(my_data, vs, entrypoint, spv::StorageClassInput, inputs, false);
1744
1745    /* Build index by location */
1746    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1747    if (vi) {
1748        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++)
1749            attribs[vi->pVertexAttributeDescriptions[i].location] = &vi->pVertexAttributeDescriptions[i];
1750    }
1751
1752    auto it_a = attribs.begin();
1753    auto it_b = inputs.begin();
1754
1755    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1756        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1757        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1758        auto a_first = a_at_end ? 0 : it_a->first;
1759        auto b_first = b_at_end ? 0 : it_b->first.first;
1760        if (!a_at_end && (b_at_end || a_first < b_first)) {
1761            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1762                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1763                        "Vertex attribute at location %d not consumed by VS", a_first)) {
1764                pass = false;
1765            }
1766            it_a++;
1767        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1768            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1769                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
1770                        b_first)) {
1771                pass = false;
1772            }
1773            it_b++;
1774        } else {
1775            unsigned attrib_type = get_format_type(it_a->second->format);
1776            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1777
1778            /* type checking */
1779            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1780                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1781                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1782                            "Attribute type of `%s` at location %d does not match VS input type of `%s`",
1783                            string_VkFormat(it_a->second->format), a_first,
1784                            describe_type(vs, it_b->second.type_id).c_str())) {
1785                    pass = false;
1786                }
1787            }
1788
1789            /* OK! */
1790            it_a++;
1791            it_b++;
1792        }
1793    }
1794
1795    return pass;
1796}
1797
1798static bool validate_fs_outputs_against_render_pass(layer_data *my_data, shader_module const *fs,
1799                                                    spirv_inst_iter entrypoint, RENDER_PASS_NODE const *rp, uint32_t subpass) {
1800    const std::vector<VkFormat> &color_formats = rp->subpassColorFormats[subpass];
1801    std::map<location_t, interface_var> outputs;
1802    bool pass = true;
1803
1804    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1805
1806    collect_interface_by_location(my_data, fs, entrypoint, spv::StorageClassOutput, outputs, false);
1807
1808    auto it = outputs.begin();
1809    uint32_t attachment = 0;
1810
1811    /* Walk attachment list and outputs together -- this is a little overpowered since attachments
1812     * are currently dense, but the parallel with matching between shader stages is nice.
1813     */
1814
1815    while ((outputs.size() > 0 && it != outputs.end()) || attachment < color_formats.size()) {
1816        if (attachment == color_formats.size() || (it != outputs.end() && it->first.first < attachment)) {
1817            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1818                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1819                        "FS writes to output location %d with no matching attachment", it->first.first)) {
1820                pass = false;
1821            }
1822            it++;
1823        } else if (it == outputs.end() || it->first.first > attachment) {
1824            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1825                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", attachment)) {
1826                pass = false;
1827            }
1828            attachment++;
1829        } else {
1830            unsigned output_type = get_fundamental_type(fs, it->second.type_id);
1831            unsigned att_type = get_format_type(color_formats[attachment]);
1832
1833            /* type checking */
1834            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1835                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1836                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1837                            "Attachment %d of type `%s` does not match FS output type of `%s`", attachment,
1838                            string_VkFormat(color_formats[attachment]),
1839                            describe_type(fs, it->second.type_id).c_str())) {
1840                    pass = false;
1841                }
1842            }
1843
1844            /* OK! */
1845            it++;
1846            attachment++;
1847        }
1848    }
1849
1850    return pass;
1851}
1852
1853/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
1854 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
1855 * for example.
1856 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1857 *  - NOT the shader input/output interfaces.
1858 *
1859 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1860 * converting parts of this to be generated from the machine-readable spec instead.
1861 */
1862static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) {
1863    std::unordered_set<uint32_t> worklist;
1864    worklist.insert(entrypoint.word(2));
1865
1866    while (!worklist.empty()) {
1867        auto id_iter = worklist.begin();
1868        auto id = *id_iter;
1869        worklist.erase(id_iter);
1870
1871        auto insn = src->get_def(id);
1872        if (insn == src->end()) {
1873            /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble
1874             * across all kinds of things here that we may not care about. */
1875            continue;
1876        }
1877
1878        /* try to add to the output set */
1879        if (!ids.insert(id).second) {
1880            continue; /* if we already saw this id, we don't want to walk it again. */
1881        }
1882
1883        switch (insn.opcode()) {
1884        case spv::OpFunction:
1885            /* scan whole body of the function, enlisting anything interesting */
1886            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1887                switch (insn.opcode()) {
1888                case spv::OpLoad:
1889                case spv::OpAtomicLoad:
1890                case spv::OpAtomicExchange:
1891                case spv::OpAtomicCompareExchange:
1892                case spv::OpAtomicCompareExchangeWeak:
1893                case spv::OpAtomicIIncrement:
1894                case spv::OpAtomicIDecrement:
1895                case spv::OpAtomicIAdd:
1896                case spv::OpAtomicISub:
1897                case spv::OpAtomicSMin:
1898                case spv::OpAtomicUMin:
1899                case spv::OpAtomicSMax:
1900                case spv::OpAtomicUMax:
1901                case spv::OpAtomicAnd:
1902                case spv::OpAtomicOr:
1903                case spv::OpAtomicXor:
1904                    worklist.insert(insn.word(3)); /* ptr */
1905                    break;
1906                case spv::OpStore:
1907                case spv::OpAtomicStore:
1908                    worklist.insert(insn.word(1)); /* ptr */
1909                    break;
1910                case spv::OpAccessChain:
1911                case spv::OpInBoundsAccessChain:
1912                    worklist.insert(insn.word(3)); /* base ptr */
1913                    break;
1914                case spv::OpSampledImage:
1915                case spv::OpImageSampleImplicitLod:
1916                case spv::OpImageSampleExplicitLod:
1917                case spv::OpImageSampleDrefImplicitLod:
1918                case spv::OpImageSampleDrefExplicitLod:
1919                case spv::OpImageSampleProjImplicitLod:
1920                case spv::OpImageSampleProjExplicitLod:
1921                case spv::OpImageSampleProjDrefImplicitLod:
1922                case spv::OpImageSampleProjDrefExplicitLod:
1923                case spv::OpImageFetch:
1924                case spv::OpImageGather:
1925                case spv::OpImageDrefGather:
1926                case spv::OpImageRead:
1927                case spv::OpImage:
1928                case spv::OpImageQueryFormat:
1929                case spv::OpImageQueryOrder:
1930                case spv::OpImageQuerySizeLod:
1931                case spv::OpImageQuerySize:
1932                case spv::OpImageQueryLod:
1933                case spv::OpImageQueryLevels:
1934                case spv::OpImageQuerySamples:
1935                case spv::OpImageSparseSampleImplicitLod:
1936                case spv::OpImageSparseSampleExplicitLod:
1937                case spv::OpImageSparseSampleDrefImplicitLod:
1938                case spv::OpImageSparseSampleDrefExplicitLod:
1939                case spv::OpImageSparseSampleProjImplicitLod:
1940                case spv::OpImageSparseSampleProjExplicitLod:
1941                case spv::OpImageSparseSampleProjDrefImplicitLod:
1942                case spv::OpImageSparseSampleProjDrefExplicitLod:
1943                case spv::OpImageSparseFetch:
1944                case spv::OpImageSparseGather:
1945                case spv::OpImageSparseDrefGather:
1946                case spv::OpImageTexelPointer:
1947                    worklist.insert(insn.word(3)); /* image or sampled image */
1948                    break;
1949                case spv::OpImageWrite:
1950                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
1951                    break;
1952                case spv::OpFunctionCall:
1953                    for (auto i = 3; i < insn.len(); i++) {
1954                        worklist.insert(insn.word(i)); /* fn itself, and all args */
1955                    }
1956                    break;
1957
1958                case spv::OpExtInst:
1959                    for (auto i = 5; i < insn.len(); i++) {
1960                        worklist.insert(insn.word(i)); /* operands to ext inst */
1961                    }
1962                    break;
1963                }
1964            }
1965            break;
1966        }
1967    }
1968}
1969
1970struct shader_stage_attributes {
1971    char const *const name;
1972    bool arrayed_input;
1973};
1974
1975static shader_stage_attributes shader_stage_attribs[] = {
1976    {"vertex shader", false},
1977    {"tessellation control shader", true},
1978    {"tessellation evaluation shader", false},
1979    {"geometry shader", true},
1980    {"fragment shader", false},
1981};
1982
1983static bool validate_push_constant_block_against_pipeline(layer_data *my_data,
1984                                                          std::vector<VkPushConstantRange> const *pushConstantRanges,
1985                                                          shader_module const *src, spirv_inst_iter type,
1986                                                          VkShaderStageFlagBits stage) {
1987    bool pass = true;
1988
1989    /* strip off ptrs etc */
1990    type = get_struct_type(src, type, false);
1991    assert(type != src->end());
1992
1993    /* validate directly off the offsets. this isn't quite correct for arrays
1994     * and matrices, but is a good first step. TODO: arrays, matrices, weird
1995     * sizes */
1996    for (auto insn : *src) {
1997        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1998
1999            if (insn.word(3) == spv::DecorationOffset) {
2000                unsigned offset = insn.word(4);
2001                auto size = 4; /* bytes; TODO: calculate this based on the type */
2002
2003                bool found_range = false;
2004                for (auto const &range : *pushConstantRanges) {
2005                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
2006                        found_range = true;
2007
2008                        if ((range.stageFlags & stage) == 0) {
2009                            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2010                                        __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2011                                        "Push constant range covering variable starting at "
2012                                        "offset %u not accessible from stage %s",
2013                                        offset, string_VkShaderStageFlagBits(stage))) {
2014                                pass = false;
2015                            }
2016                        }
2017
2018                        break;
2019                    }
2020                }
2021
2022                if (!found_range) {
2023                    if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2024                                __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
2025                                "Push constant range covering variable starting at "
2026                                "offset %u not declared in layout",
2027                                offset)) {
2028                        pass = false;
2029                    }
2030                }
2031            }
2032        }
2033    }
2034
2035    return pass;
2036}
2037
2038static bool validate_push_constant_usage(layer_data *my_data,
2039                                         std::vector<VkPushConstantRange> const *pushConstantRanges, shader_module const *src,
2040                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2041    bool pass = true;
2042
2043    for (auto id : accessible_ids) {
2044        auto def_insn = src->get_def(id);
2045        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
2046            pass &= validate_push_constant_block_against_pipeline(my_data, pushConstantRanges, src,
2047                                                                 src->get_def(def_insn.word(1)), stage);
2048        }
2049    }
2050
2051    return pass;
2052}
2053
2054// For given pipelineLayout verify that the setLayout at slot.first
2055//  has the requested binding at slot.second
2056static VkDescriptorSetLayoutBinding const * get_descriptor_binding(layer_data *my_data, PIPELINE_LAYOUT_NODE *pipelineLayout, descriptor_slot_t slot) {
2057
2058    if (!pipelineLayout)
2059        return nullptr;
2060
2061    if (slot.first >= pipelineLayout->descriptorSetLayouts.size())
2062        return nullptr;
2063
2064    auto const layout_node = my_data->descriptorSetLayoutMap[pipelineLayout->descriptorSetLayouts[slot.first]];
2065
2066    auto bindingIt = layout_node->bindingToIndexMap.find(slot.second);
2067    if ((bindingIt == layout_node->bindingToIndexMap.end()) || (layout_node->createInfo.pBindings == NULL))
2068        return nullptr;
2069
2070    assert(bindingIt->second < layout_node->createInfo.bindingCount);
2071    return &layout_node->createInfo.pBindings[bindingIt->second];
2072}
2073
2074// Block of code at start here for managing/tracking Pipeline state that this layer cares about
2075
2076static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
2077
2078// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2079//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2080//   to that same cmd buffer by separate thread are not changing state from underneath us
2081// Track the last cmd buffer touched by this thread
2082
2083static VkBool32 hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2084    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2085        if (pCB->drawCount[i])
2086            return VK_TRUE;
2087    }
2088    return VK_FALSE;
2089}
2090
2091// Check object status for selected flag state
2092static VkBool32 validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
2093                                DRAW_STATE_ERROR error_code, const char *fail_msg) {
2094    if (!(pNode->status & status_mask)) {
2095        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2096                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS",
2097                       "CB object %#" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg);
2098    }
2099    return VK_FALSE;
2100}
2101
2102// Retrieve pipeline node ptr for given pipeline object
2103static PIPELINE_NODE *getPipeline(layer_data *my_data, const VkPipeline pipeline) {
2104    if (my_data->pipelineMap.find(pipeline) == my_data->pipelineMap.end()) {
2105        return NULL;
2106    }
2107    return my_data->pipelineMap[pipeline];
2108}
2109
2110// Return VK_TRUE if for a given PSO, the given state enum is dynamic, else return VK_FALSE
2111static VkBool32 isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
2112    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2113        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2114            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2115                return VK_TRUE;
2116        }
2117    }
2118    return VK_FALSE;
2119}
2120
2121// Validate state stored as flags at time of draw call
2122static VkBool32 validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe,
2123                                          VkBool32 indexedDraw) {
2124    VkBool32 result;
2125    result = validate_status(dev_data, pCB, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_VIEWPORT_NOT_BOUND,
2126                             "Dynamic viewport state not set for this command buffer");
2127    result |= validate_status(dev_data, pCB, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_SCISSOR_NOT_BOUND,
2128                              "Dynamic scissor state not set for this command buffer");
2129    if ((pPipe->iaStateCI.topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2130        (pPipe->iaStateCI.topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP)) {
2131        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2132                                  DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
2133    }
2134    if (pPipe->rsStateCI.depthBiasEnable) {
2135        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2136                                  DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
2137    }
2138    if (pPipe->blendConstantsEnabled) {
2139        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2140                                  DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer");
2141    }
2142    if (pPipe->dsStateCI.depthBoundsTestEnable) {
2143        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2144                                  DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
2145    }
2146    if (pPipe->dsStateCI.stencilTestEnable) {
2147        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2148                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
2149        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2150                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
2151        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2152                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
2153    }
2154    if (indexedDraw) {
2155        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2156                                  DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2157                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2158    }
2159    return result;
2160}
2161
2162// Verify attachment reference compatibility according to spec
2163//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2164//  If both AttachmentReference arrays have requested index, check their corresponding AttachementDescriptions
2165//   to make sure that format and samples counts match.
2166//  If not, they are not compatible.
2167static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2168                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2169                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2170                                             const VkAttachmentDescription *pSecondaryAttachments) {
2171    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2172        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2173            return true;
2174    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2175        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2176            return true;
2177    } else { // format and sample count must match
2178        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2179             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2180            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2181             pSecondaryAttachments[pSecondary[index].attachment].samples))
2182            return true;
2183    }
2184    // Format and sample counts didn't match
2185    return false;
2186}
2187
2188// For give primary and secondary RenderPass objects, verify that they're compatible
2189static bool verify_renderpass_compatibility(layer_data *my_data, const VkRenderPass primaryRP, const VkRenderPass secondaryRP,
2190                                            string &errorMsg) {
2191    stringstream errorStr;
2192    if (my_data->renderPassMap.find(primaryRP) == my_data->renderPassMap.end()) {
2193        errorStr << "invalid VkRenderPass (" << primaryRP << ")";
2194        errorMsg = errorStr.str();
2195        return false;
2196    } else if (my_data->renderPassMap.find(secondaryRP) == my_data->renderPassMap.end()) {
2197        errorStr << "invalid VkRenderPass (" << secondaryRP << ")";
2198        errorMsg = errorStr.str();
2199        return false;
2200    }
2201    // Trivial pass case is exact same RP
2202    if (primaryRP == secondaryRP) {
2203        return true;
2204    }
2205    const VkRenderPassCreateInfo *primaryRPCI = my_data->renderPassMap[primaryRP]->pCreateInfo;
2206    const VkRenderPassCreateInfo *secondaryRPCI = my_data->renderPassMap[secondaryRP]->pCreateInfo;
2207    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2208        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2209                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2210        errorMsg = errorStr.str();
2211        return false;
2212    }
2213    uint32_t spIndex = 0;
2214    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2215        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2216        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2217        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2218        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2219        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2220            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2221                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2222                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2223                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2224                errorMsg = errorStr.str();
2225                return false;
2226            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2227                                                         primaryColorCount, primaryRPCI->pAttachments,
2228                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2229                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2230                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2231                errorMsg = errorStr.str();
2232                return false;
2233            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2234                                                         primaryColorCount, primaryRPCI->pAttachments,
2235                                                         secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2236                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2237                errorStr << "depth/stencil attachments at index " << cIdx << " of subpass index " << spIndex
2238                         << " are not compatible.";
2239                errorMsg = errorStr.str();
2240                return false;
2241            }
2242        }
2243        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2244        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2245        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2246        for (uint32_t i = 0; i < inputMax; ++i) {
2247            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2248                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2249                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2250                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2251                errorMsg = errorStr.str();
2252                return false;
2253            }
2254        }
2255    }
2256    return true;
2257}
2258
2259// For give SET_NODE, verify that its Set is compatible w/ the setLayout corresponding to pipelineLayout[layoutIndex]
2260static bool verify_set_layout_compatibility(layer_data *my_data, const SET_NODE *pSet, const VkPipelineLayout layout,
2261                                            const uint32_t layoutIndex, string &errorMsg) {
2262    stringstream errorStr;
2263    auto pipeline_layout_it = my_data->pipelineLayoutMap.find(layout);
2264    if (pipeline_layout_it == my_data->pipelineLayoutMap.end()) {
2265        errorStr << "invalid VkPipelineLayout (" << layout << ")";
2266        errorMsg = errorStr.str();
2267        return false;
2268    }
2269    if (layoutIndex >= pipeline_layout_it->second.descriptorSetLayouts.size()) {
2270        errorStr << "VkPipelineLayout (" << layout << ") only contains " << pipeline_layout_it->second.descriptorSetLayouts.size()
2271                 << " setLayouts corresponding to sets 0-" << pipeline_layout_it->second.descriptorSetLayouts.size() - 1
2272                 << ", but you're attempting to bind set to index " << layoutIndex;
2273        errorMsg = errorStr.str();
2274        return false;
2275    }
2276    // Get the specific setLayout from PipelineLayout that overlaps this set
2277    LAYOUT_NODE *pLayoutNode = my_data->descriptorSetLayoutMap[pipeline_layout_it->second.descriptorSetLayouts[layoutIndex]];
2278    if (pLayoutNode->layout == pSet->pLayout->layout) { // trivial pass case
2279        return true;
2280    }
2281    size_t descriptorCount = pLayoutNode->descriptorTypes.size();
2282    if (descriptorCount != pSet->pLayout->descriptorTypes.size()) {
2283        errorStr << "setLayout " << layoutIndex << " from pipelineLayout " << layout << " has " << descriptorCount
2284                 << " descriptors, but corresponding set being bound has " << pSet->pLayout->descriptorTypes.size()
2285                 << " descriptors.";
2286        errorMsg = errorStr.str();
2287        return false; // trivial fail case
2288    }
2289    // Now need to check set against corresponding pipelineLayout to verify compatibility
2290    for (size_t i = 0; i < descriptorCount; ++i) {
2291        // Need to verify that layouts are identically defined
2292        //  TODO : Is below sufficient? Making sure that types & stageFlags match per descriptor
2293        //    do we also need to check immutable samplers?
2294        if (pLayoutNode->descriptorTypes[i] != pSet->pLayout->descriptorTypes[i]) {
2295            errorStr << "descriptor " << i << " for descriptorSet being bound is type '"
2296                     << string_VkDescriptorType(pSet->pLayout->descriptorTypes[i])
2297                     << "' but corresponding descriptor from pipelineLayout is type '"
2298                     << string_VkDescriptorType(pLayoutNode->descriptorTypes[i]) << "'";
2299            errorMsg = errorStr.str();
2300            return false;
2301        }
2302        if (pLayoutNode->stageFlags[i] != pSet->pLayout->stageFlags[i]) {
2303            errorStr << "stageFlags " << i << " for descriptorSet being bound is " << pSet->pLayout->stageFlags[i]
2304                     << "' but corresponding descriptor from pipelineLayout has stageFlags " << pLayoutNode->stageFlags[i];
2305            errorMsg = errorStr.str();
2306            return false;
2307        }
2308    }
2309    return true;
2310}
2311
2312// Validate that data for each specialization entry is fully contained within the buffer.
2313static VkBool32 validate_specialization_offsets(layer_data *my_data, VkPipelineShaderStageCreateInfo const *info) {
2314    VkBool32 pass = VK_TRUE;
2315
2316    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2317
2318    if (spec) {
2319        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2320            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2321                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2322                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2323                            "Specialization entry %u (for constant id %u) references memory outside provided "
2324                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2325                            " bytes provided)",
2326                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2327                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2328
2329                    pass = VK_FALSE;
2330                }
2331            }
2332        }
2333    }
2334
2335    return pass;
2336}
2337
2338static bool descriptor_type_match(layer_data *my_data, shader_module const *module, uint32_t type_id,
2339                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2340    auto type = module->get_def(type_id);
2341
2342    descriptor_count = 1;
2343
2344    /* Strip off any array or ptrs. Where we remove array levels, adjust the
2345     * descriptor count for each dimension. */
2346    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2347        if (type.opcode() == spv::OpTypeArray) {
2348            descriptor_count *= get_constant_value(module, type.word(3));
2349            type = module->get_def(type.word(2));
2350        }
2351        else {
2352            type = module->get_def(type.word(3));
2353        }
2354    }
2355
2356    switch (type.opcode()) {
2357    case spv::OpTypeStruct: {
2358        for (auto insn : *module) {
2359            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2360                if (insn.word(2) == spv::DecorationBlock) {
2361                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2362                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2363                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2364                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2365                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2366                }
2367            }
2368        }
2369
2370        /* Invalid */
2371        return false;
2372    }
2373
2374    case spv::OpTypeSampler:
2375        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER;
2376
2377    case spv::OpTypeSampledImage:
2378        if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2379            /* Slight relaxation for some GLSL historical madness: samplerBuffer
2380             * doesn't really have a sampler, and a texel buffer descriptor
2381             * doesn't really provide one. Allow this slight mismatch.
2382             */
2383            auto image_type = module->get_def(type.word(2));
2384            auto dim = image_type.word(3);
2385            auto sampled = image_type.word(7);
2386            return dim == spv::DimBuffer && sampled == 1;
2387        }
2388        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2389
2390    case spv::OpTypeImage: {
2391        /* Many descriptor types backing image types-- depends on dimension
2392         * and whether the image will be used with a sampler. SPIRV for
2393         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2394         * runtime is unacceptable.
2395         */
2396        auto dim = type.word(3);
2397        auto sampled = type.word(7);
2398
2399        if (dim == spv::DimSubpassData) {
2400            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2401        } else if (dim == spv::DimBuffer) {
2402            if (sampled == 1) {
2403                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2404            } else {
2405                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2406            }
2407        } else if (sampled == 1) {
2408            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
2409        } else {
2410            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2411        }
2412    }
2413
2414    /* We shouldn't really see any other junk types -- but if we do, they're
2415     * a mismatch.
2416     */
2417    default:
2418        return false; /* Mismatch */
2419    }
2420}
2421
2422static VkBool32 require_feature(layer_data *my_data, VkBool32 feature, char const *feature_name) {
2423    if (!feature) {
2424        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2425                    __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2426                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2427                    "enabled on the device",
2428                    feature_name)) {
2429            return false;
2430        }
2431    }
2432
2433    return true;
2434}
2435
2436static VkBool32 validate_shader_capabilities(layer_data *my_data, shader_module const *src)
2437{
2438    VkBool32 pass = VK_TRUE;
2439
2440    auto enabledFeatures = &my_data->physDevProperties.features;
2441
2442    for (auto insn : *src) {
2443        if (insn.opcode() == spv::OpCapability) {
2444            switch (insn.word(1)) {
2445            case spv::CapabilityMatrix:
2446            case spv::CapabilityShader:
2447            case spv::CapabilityInputAttachment:
2448            case spv::CapabilitySampled1D:
2449            case spv::CapabilityImage1D:
2450            case spv::CapabilitySampledBuffer:
2451            case spv::CapabilityImageBuffer:
2452            case spv::CapabilityImageQuery:
2453            case spv::CapabilityDerivativeControl:
2454                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2455                break;
2456
2457            case spv::CapabilityGeometry:
2458                pass &= require_feature(my_data, enabledFeatures->geometryShader, "geometryShader");
2459                break;
2460
2461            case spv::CapabilityTessellation:
2462                pass &= require_feature(my_data, enabledFeatures->tessellationShader, "tessellationShader");
2463                break;
2464
2465            case spv::CapabilityFloat64:
2466                pass &= require_feature(my_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2467                break;
2468
2469            case spv::CapabilityInt64:
2470                pass &= require_feature(my_data, enabledFeatures->shaderInt64, "shaderInt64");
2471                break;
2472
2473            case spv::CapabilityTessellationPointSize:
2474            case spv::CapabilityGeometryPointSize:
2475                pass &= require_feature(my_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2476                                        "shaderTessellationAndGeometryPointSize");
2477                break;
2478
2479            case spv::CapabilityImageGatherExtended:
2480                pass &= require_feature(my_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2481                break;
2482
2483            case spv::CapabilityStorageImageMultisample:
2484                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2485                break;
2486
2487            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2488                pass &= require_feature(my_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2489                                        "shaderUniformBufferArrayDynamicIndexing");
2490                break;
2491
2492            case spv::CapabilitySampledImageArrayDynamicIndexing:
2493                pass &= require_feature(my_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2494                                        "shaderSampledImageArrayDynamicIndexing");
2495                break;
2496
2497            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2498                pass &= require_feature(my_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2499                                        "shaderStorageBufferArrayDynamicIndexing");
2500                break;
2501
2502            case spv::CapabilityStorageImageArrayDynamicIndexing:
2503                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2504                                        "shaderStorageImageArrayDynamicIndexing");
2505                break;
2506
2507            case spv::CapabilityClipDistance:
2508                pass &= require_feature(my_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2509                break;
2510
2511            case spv::CapabilityCullDistance:
2512                pass &= require_feature(my_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2513                break;
2514
2515            case spv::CapabilityImageCubeArray:
2516                pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2517                break;
2518
2519            case spv::CapabilitySampleRateShading:
2520                pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2521                break;
2522
2523            case spv::CapabilitySparseResidency:
2524                pass &= require_feature(my_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2525                break;
2526
2527            case spv::CapabilityMinLod:
2528                pass &= require_feature(my_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2529                break;
2530
2531            case spv::CapabilitySampledCubeArray:
2532                pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2533                break;
2534
2535            case spv::CapabilityImageMSArray:
2536                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2537                break;
2538
2539            case spv::CapabilityStorageImageExtendedFormats:
2540                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageExtendedFormats,
2541                                        "shaderStorageImageExtendedFormats");
2542                break;
2543
2544            case spv::CapabilityInterpolationFunction:
2545                pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2546                break;
2547
2548            case spv::CapabilityStorageImageReadWithoutFormat:
2549                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2550                                        "shaderStorageImageReadWithoutFormat");
2551                break;
2552
2553            case spv::CapabilityStorageImageWriteWithoutFormat:
2554                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2555                                        "shaderStorageImageWriteWithoutFormat");
2556                break;
2557
2558            case spv::CapabilityMultiViewport:
2559                pass &= require_feature(my_data, enabledFeatures->multiViewport, "multiViewport");
2560                break;
2561
2562            default:
2563                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2564                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2565                            "Shader declares capability %u, not supported in Vulkan.",
2566                            insn.word(1)))
2567                    pass = VK_FALSE;
2568                break;
2569            }
2570        }
2571    }
2572
2573    return pass;
2574}
2575
2576
2577
2578static VkBool32 validate_pipeline_shader_stage(layer_data *dev_data,
2579                                               VkPipelineShaderStageCreateInfo const *pStage,
2580                                               PIPELINE_NODE *pipeline,
2581                                               PIPELINE_LAYOUT_NODE *pipelineLayout,
2582                                               shader_module **out_module,
2583                                               spirv_inst_iter *out_entrypoint)
2584{
2585    VkBool32 pass = VK_TRUE;
2586    auto module = *out_module = dev_data->shaderModuleMap[pStage->module].get();
2587    pass &= validate_specialization_offsets(dev_data, pStage);
2588
2589    /* find the entrypoint */
2590    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2591    if (entrypoint == module->end()) {
2592        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2593                    __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2594                    "No entrypoint found named `%s` for stage %s", pStage->pName,
2595                    string_VkShaderStageFlagBits(pStage->stage))) {
2596            pass = VK_FALSE;
2597        }
2598    }
2599
2600    /* validate shader capabilities against enabled device features */
2601    pass &= validate_shader_capabilities(dev_data, module);
2602
2603    /* mark accessible ids */
2604    std::unordered_set<uint32_t> accessible_ids;
2605    mark_accessible_ids(module, entrypoint, accessible_ids);
2606
2607    /* validate descriptor set layout against what the entrypoint actually uses */
2608    std::map<descriptor_slot_t, interface_var> descriptor_uses;
2609    collect_interface_by_descriptor_slot(dev_data, module, accessible_ids, descriptor_uses);
2610
2611    /* validate push constant usage */
2612    pass &= validate_push_constant_usage(dev_data, &pipelineLayout->pushConstantRanges,
2613                                        module, accessible_ids, pStage->stage);
2614
2615    /* validate descriptor use */
2616    for (auto use : descriptor_uses) {
2617        // While validating shaders capture which slots are used by the pipeline
2618        pipeline->active_slots[use.first.first].insert(use.first.second);
2619
2620        /* find the matching binding */
2621        auto binding = get_descriptor_binding(dev_data, pipelineLayout, use.first);
2622        unsigned required_descriptor_count;
2623
2624        if (!binding) {
2625            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2626                        __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2627                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2628                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2629                pass = VK_FALSE;
2630            }
2631        } else if (~binding->stageFlags & pStage->stage) {
2632            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2633                        /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2634                        "Shader uses descriptor slot %u.%u (used "
2635                        "as type `%s`) but descriptor not "
2636                        "accessible from stage %s",
2637                        use.first.first, use.first.second,
2638                        describe_type(module, use.second.type_id).c_str(),
2639                        string_VkShaderStageFlagBits(pStage->stage))) {
2640                pass = VK_FALSE;
2641            }
2642        } else if (!descriptor_type_match(dev_data, module, use.second.type_id, binding->descriptorType, /*out*/ required_descriptor_count)) {
2643            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2644                        __LINE__, SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2645                        "Type mismatch on descriptor slot "
2646                        "%u.%u (used as type `%s`) but "
2647                        "descriptor of type %s",
2648                        use.first.first, use.first.second,
2649                        describe_type(module, use.second.type_id).c_str(),
2650                        string_VkDescriptorType(binding->descriptorType))) {
2651                pass = VK_FALSE;
2652            }
2653        } else if (binding->descriptorCount < required_descriptor_count) {
2654            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2655                        __LINE__, SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2656                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2657                        required_descriptor_count, use.first.first, use.first.second,
2658                        describe_type(module, use.second.type_id).c_str(),
2659                        binding->descriptorCount)) {
2660                pass = VK_FALSE;
2661            }
2662        }
2663    }
2664
2665    return pass;
2666}
2667
2668
2669// Validate that the shaders used by the given pipeline and store the active_slots
2670//  that are actually used by the pipeline into pPipeline->active_slots
2671static VkBool32 validate_and_capture_pipeline_shader_state(layer_data *my_data, PIPELINE_NODE *pPipeline) {
2672    VkGraphicsPipelineCreateInfo const *pCreateInfo = &pPipeline->graphicsPipelineCI;
2673    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2674    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2675
2676    shader_module *shaders[5];
2677    memset(shaders, 0, sizeof(shaders));
2678    spirv_inst_iter entrypoints[5];
2679    memset(entrypoints, 0, sizeof(entrypoints));
2680    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2681    VkBool32 pass = VK_TRUE;
2682
2683    auto pipelineLayout = pCreateInfo->layout != VK_NULL_HANDLE ? &my_data->pipelineLayoutMap[pCreateInfo->layout] : nullptr;
2684
2685    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2686        VkPipelineShaderStageCreateInfo const *pStage = &pCreateInfo->pStages[i];
2687        auto stage_id = get_shader_stage_id(pStage->stage);
2688        pass &= validate_pipeline_shader_stage(my_data, pStage, pPipeline, pipelineLayout,
2689                                               &shaders[stage_id], &entrypoints[stage_id]);
2690    }
2691
2692    vi = pCreateInfo->pVertexInputState;
2693
2694    if (vi) {
2695        pass &= validate_vi_consistency(my_data, vi);
2696    }
2697
2698    if (shaders[vertex_stage]) {
2699        pass &= validate_vi_against_vs_inputs(my_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2700    }
2701
2702    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2703    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2704
2705    while (!shaders[producer] && producer != fragment_stage) {
2706        producer++;
2707        consumer++;
2708    }
2709
2710    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2711        assert(shaders[producer]);
2712        if (shaders[consumer]) {
2713            pass &= validate_interface_between_stages(my_data, shaders[producer], entrypoints[producer],
2714                                                     shader_stage_attribs[producer].name, shaders[consumer], entrypoints[consumer],
2715                                                     shader_stage_attribs[consumer].name,
2716                                                     shader_stage_attribs[consumer].arrayed_input);
2717
2718            producer = consumer;
2719        }
2720    }
2721
2722    auto rp = pCreateInfo->renderPass != VK_NULL_HANDLE ? my_data->renderPassMap[pCreateInfo->renderPass] : nullptr;
2723
2724    if (shaders[fragment_stage] && rp) {
2725        pass &= validate_fs_outputs_against_render_pass(my_data, shaders[fragment_stage], entrypoints[fragment_stage], rp,
2726                                                       pCreateInfo->subpass);
2727    }
2728
2729    return pass;
2730}
2731
2732static VkBool32 validate_compute_pipeline(layer_data *my_data, PIPELINE_NODE *pPipeline) {
2733    VkComputePipelineCreateInfo const *pCreateInfo = &pPipeline->computePipelineCI;
2734
2735    auto pipelineLayout = pCreateInfo->layout != VK_NULL_HANDLE ? &my_data->pipelineLayoutMap[pCreateInfo->layout] : nullptr;
2736
2737    shader_module *module;
2738    spirv_inst_iter entrypoint;
2739
2740    return validate_pipeline_shader_stage(my_data, &pCreateInfo->stage, pPipeline, pipelineLayout,
2741                                          &module, &entrypoint);
2742}
2743
2744// Return Set node ptr for specified set or else NULL
2745static SET_NODE *getSetNode(layer_data *my_data, const VkDescriptorSet set) {
2746    if (my_data->setMap.find(set) == my_data->setMap.end()) {
2747        return NULL;
2748    }
2749    return my_data->setMap[set];
2750}
2751
2752// For given Layout Node and binding, return index where that binding begins
2753static uint32_t getBindingStartIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) {
2754    uint32_t offsetIndex = 0;
2755    for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
2756        if (pLayout->createInfo.pBindings[i].binding == binding)
2757            break;
2758        offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount;
2759    }
2760    return offsetIndex;
2761}
2762
2763// For given layout node and binding, return last index that is updated
2764static uint32_t getBindingEndIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) {
2765    uint32_t offsetIndex = 0;
2766    for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
2767        offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount;
2768        if (pLayout->createInfo.pBindings[i].binding == binding)
2769            break;
2770    }
2771    return offsetIndex - 1;
2772}
2773
2774// For the given command buffer, verify and update the state for activeSetBindingsPairs
2775//  This includes:
2776//  1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound.
2777//     To be valid, the dynamic offset combined with the offset and range from its
2778//     descriptor update must not overflow the size of its buffer being updated
2779//  2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images
2780//  3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers
2781static VkBool32 validate_and_update_drawtime_descriptor_state(
2782    layer_data *dev_data, GLOBAL_CB_NODE *pCB,
2783    const vector<std::pair<SET_NODE *, unordered_set<uint32_t>>> &activeSetBindingsPairs) {
2784    VkBool32 result = VK_FALSE;
2785
2786    VkWriteDescriptorSet *pWDS = NULL;
2787    uint32_t dynOffsetIndex = 0;
2788    VkDeviceSize bufferSize = 0;
2789    for (auto set_bindings_pair : activeSetBindingsPairs) {
2790        SET_NODE *set_node = set_bindings_pair.first;
2791        LAYOUT_NODE *layout_node = set_node->pLayout;
2792        for (auto binding : set_bindings_pair.second) {
2793            uint32_t startIdx = getBindingStartIndex(layout_node, binding);
2794            uint32_t endIdx = getBindingEndIndex(layout_node, binding);
2795            for (uint32_t i = startIdx; i <= endIdx; ++i) {
2796                // TODO : Flag error here if set_node->pDescriptorUpdates[i] is NULL
2797                switch (set_node->pDescriptorUpdates[i]->sType) {
2798                case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
2799                    pWDS = (VkWriteDescriptorSet *)set_node->pDescriptorUpdates[i];
2800                    if ((pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
2801                        (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
2802                        for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2803                            bufferSize = dev_data->bufferMap[pWDS->pBufferInfo[j].buffer].create_info->size;
2804                            uint32_t dynOffset = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].dynamicOffsets[dynOffsetIndex];
2805                            if (pWDS->pBufferInfo[j].range == VK_WHOLE_SIZE) {
2806                                if ((dynOffset + pWDS->pBufferInfo[j].offset) > bufferSize) {
2807                                    result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2808                                                      VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2809                                                      reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2810                                                      DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
2811                                                      "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has range of "
2812                                                      "VK_WHOLE_SIZE but dynamic offset %#" PRIxLEAST32 ". "
2813                                                      "combined with offset %#" PRIxLEAST64 " oversteps its buffer (%#" PRIxLEAST64
2814                                                      ") which has a size of %#" PRIxLEAST64 ".",
2815                                                      reinterpret_cast<const uint64_t &>(set_node->set), i,
2816                                                      pCB->dynamicOffsets[dynOffsetIndex], pWDS->pBufferInfo[j].offset,
2817                                                      reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2818                                }
2819                            } else if ((dynOffset + pWDS->pBufferInfo[j].offset + pWDS->pBufferInfo[j].range) > bufferSize) {
2820                                result |= log_msg(
2821                                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2822                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2823                                    reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW,
2824                                    "DS",
2825                                    "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has dynamic offset %#" PRIxLEAST32 ". "
2826                                    "Combined with offset %#" PRIxLEAST64 " and range %#" PRIxLEAST64
2827                                    " from its update, this oversteps its buffer "
2828                                    "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".",
2829                                    reinterpret_cast<const uint64_t &>(set_node->set), i, pCB->dynamicOffsets[dynOffsetIndex],
2830                                    pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range,
2831                                    reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2832                            } else if ((dynOffset + pWDS->pBufferInfo[j].offset + pWDS->pBufferInfo[j].range) > bufferSize) {
2833                                result |= log_msg(
2834                                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2835                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2836                                    reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW,
2837                                    "DS",
2838                                    "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has dynamic offset %#" PRIxLEAST32 ". "
2839                                    "Combined with offset %#" PRIxLEAST64 " and range %#" PRIxLEAST64
2840                                    " from its update, this oversteps its buffer "
2841                                    "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".",
2842                                    reinterpret_cast<const uint64_t &>(set_node->set), i, pCB->dynamicOffsets[dynOffsetIndex],
2843                                    pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range,
2844                                    reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2845                            }
2846                            dynOffsetIndex++;
2847                        }
2848                    } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
2849                        for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2850                            pCB->updateImages.insert(pWDS->pImageInfo[j].imageView);
2851                        }
2852                    } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
2853                        for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2854                            assert(dev_data->bufferViewMap.find(pWDS->pTexelBufferView[j]) != dev_data->bufferViewMap.end());
2855                            pCB->updateBuffers.insert(dev_data->bufferViewMap[pWDS->pTexelBufferView[j]].buffer);
2856                        }
2857                    } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2858                               pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
2859                        for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2860                            pCB->updateBuffers.insert(pWDS->pBufferInfo[j].buffer);
2861                        }
2862                    }
2863                    i += pWDS->descriptorCount; // Advance i to end of this set of descriptors (++i at end of for loop will move 1
2864                                                // index past last of these descriptors)
2865                    break;
2866                default: // Currently only shadowing Write update nodes so shouldn't get here
2867                    assert(0);
2868                    continue;
2869                }
2870            }
2871        }
2872    }
2873    return result;
2874}
2875// TODO : This is a temp function that naively updates bound storage images and buffers based on which descriptor sets are bound.
2876//   When validate_and_update_draw_state() handles computer shaders so that active_slots is correct for compute pipelines, this
2877//   function can be killed and validate_and_update_draw_state() used instead
2878static void update_shader_storage_images_and_buffers(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
2879    VkWriteDescriptorSet *pWDS = nullptr;
2880    SET_NODE *pSet = nullptr;
2881    // For the bound descriptor sets, pull off any storage images and buffers
2882    //  This may be more than are actually updated depending on which are active, but for now this is a stop-gap for compute
2883    //  pipelines
2884    for (auto set : pCB->lastBound[VK_PIPELINE_BIND_POINT_COMPUTE].uniqueBoundSets) {
2885        // Get the set node
2886        pSet = getSetNode(dev_data, set);
2887        // For each update in the set
2888        for (auto pUpdate : pSet->pDescriptorUpdates) {
2889            // If it's a write update to STORAGE type capture image/buffer being updated
2890            if (pUpdate && (pUpdate->sType == VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET)) {
2891                pWDS = reinterpret_cast<VkWriteDescriptorSet *>(pUpdate);
2892                if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
2893                    for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2894                        pCB->updateImages.insert(pWDS->pImageInfo[j].imageView);
2895                    }
2896                } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
2897                    for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2898                        pCB->updateBuffers.insert(dev_data->bufferViewMap[pWDS->pTexelBufferView[j]].buffer);
2899                    }
2900                } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2901                           pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
2902                    for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2903                        pCB->updateBuffers.insert(pWDS->pBufferInfo[j].buffer);
2904                    }
2905                }
2906            }
2907        }
2908    }
2909}
2910
2911// Validate overall state at the time of a draw call
2912static VkBool32 validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, const VkBool32 indexedDraw,
2913                                               const VkPipelineBindPoint bindPoint) {
2914    VkBool32 result = VK_FALSE;
2915    auto const &state = pCB->lastBound[bindPoint];
2916    PIPELINE_NODE *pPipe = getPipeline(my_data, state.pipeline);
2917    // First check flag states
2918    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
2919        result = validate_draw_state_flags(my_data, pCB, pPipe, indexedDraw);
2920
2921    // Now complete other state checks
2922    // TODO : Currently only performing next check if *something* was bound (non-zero last bound)
2923    //  There is probably a better way to gate when this check happens, and to know if something *should* have been bound
2924    //  We should have that check separately and then gate this check based on that check
2925    if (pPipe) {
2926        if (state.pipelineLayout) {
2927            string errorString;
2928            // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
2929            vector<std::pair<SET_NODE *, unordered_set<uint32_t>>> activeSetBindingsPairs;
2930            for (auto setBindingPair : pPipe->active_slots) {
2931                uint32_t setIndex = setBindingPair.first;
2932                // If valid set is not bound throw an error
2933                if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
2934                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2935                                      __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
2936                                      "VkPipeline %#" PRIxLEAST64 " uses set #%u but that set is not bound.",
2937                                      (uint64_t)pPipe->pipeline, setIndex);
2938                } else if (!verify_set_layout_compatibility(my_data, my_data->setMap[state.boundDescriptorSets[setIndex]],
2939                                                            pPipe->graphicsPipelineCI.layout, setIndex, errorString)) {
2940                    // Set is bound but not compatible w/ overlapping pipelineLayout from PSO
2941                    VkDescriptorSet setHandle = my_data->setMap[state.boundDescriptorSets[setIndex]]->set;
2942                    result |= log_msg(
2943                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2944                        (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
2945                        "VkDescriptorSet (%#" PRIxLEAST64
2946                        ") bound as set #%u is not compatible with overlapping VkPipelineLayout %#" PRIxLEAST64 " due to: %s",
2947                        (uint64_t)setHandle, setIndex, (uint64_t)pPipe->graphicsPipelineCI.layout, errorString.c_str());
2948                } else { // Valid set is bound and layout compatible, validate that it's updated
2949                    // Pull the set node
2950                    SET_NODE *pSet = my_data->setMap[state.boundDescriptorSets[setIndex]];
2951                    // Save vector of all active sets to verify dynamicOffsets below
2952                    // activeSetNodes.push_back(pSet);
2953                    activeSetBindingsPairs.push_back(std::make_pair(pSet, setBindingPair.second));
2954                    // Make sure set has been updated
2955                    if (!pSet->pUpdateStructs) {
2956                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2957                                          VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pSet->set, __LINE__,
2958                                          DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2959                                          "DS %#" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
2960                                                              "this will result in undefined behavior.",
2961                                          (uint64_t)pSet->set);
2962                    }
2963                }
2964            }
2965            // For given active slots, verify any dynamic descriptors and record updated images & buffers
2966            result |= validate_and_update_drawtime_descriptor_state(my_data, pCB, activeSetBindingsPairs);
2967        }
2968        if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint) {
2969            // Verify Vtx binding
2970            if (pPipe->vertexBindingDescriptions.size() > 0) {
2971                for (size_t i = 0; i < pPipe->vertexBindingDescriptions.size(); i++) {
2972                    if ((pCB->currentDrawData.buffers.size() < (i + 1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) {
2973                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2974                                          __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2975                                          "The Pipeline State Object (%#" PRIxLEAST64
2976                                          ") expects that this Command Buffer's vertex binding Index " PRINTF_SIZE_T_SPECIFIER
2977                                          " should be set via vkCmdBindVertexBuffers.",
2978                                          (uint64_t)state.pipeline, i);
2979                    }
2980                }
2981            } else {
2982                if (!pCB->currentDrawData.buffers.empty()) {
2983                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
2984                                      (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2985                                      "Vertex buffers are bound to command buffer (%#" PRIxLEAST64
2986                                      ") but no vertex buffers are attached to this Pipeline State Object (%#" PRIxLEAST64 ").",
2987                                      (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline);
2988                }
2989            }
2990            // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2991            // Skip check if rasterization is disabled or there is no viewport.
2992            if ((!pPipe->graphicsPipelineCI.pRasterizationState ||
2993                 !pPipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) &&
2994                pPipe->graphicsPipelineCI.pViewportState) {
2995                VkBool32 dynViewport = isDynamic(pPipe, VK_DYNAMIC_STATE_VIEWPORT);
2996                VkBool32 dynScissor = isDynamic(pPipe, VK_DYNAMIC_STATE_SCISSOR);
2997                if (dynViewport) {
2998                    if (pCB->viewports.size() != pPipe->graphicsPipelineCI.pViewportState->viewportCount) {
2999                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3000                                          __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3001                                          "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER
3002                                          ", but PSO viewportCount is %u. These counts must match.",
3003                                          pCB->viewports.size(), pPipe->graphicsPipelineCI.pViewportState->viewportCount);
3004                    }
3005                }
3006                if (dynScissor) {
3007                    if (pCB->scissors.size() != pPipe->graphicsPipelineCI.pViewportState->scissorCount) {
3008                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3009                                          __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3010                                          "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER
3011                                          ", but PSO scissorCount is %u. These counts must match.",
3012                                          pCB->scissors.size(), pPipe->graphicsPipelineCI.pViewportState->scissorCount);
3013                    }
3014                }
3015            }
3016        }
3017    }
3018    return result;
3019}
3020
3021// Verify that create state for a pipeline is valid
3022static VkBool32 verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
3023                                          int pipelineIndex) {
3024    VkBool32 skipCall = VK_FALSE;
3025
3026    PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
3027
3028    // If create derivative bit is set, check that we've specified a base
3029    // pipeline correctly, and that the base pipeline was created to allow
3030    // derivatives.
3031    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3032        PIPELINE_NODE *pBasePipeline = nullptr;
3033        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3034              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3035            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3036                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3037                                "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3038        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3039            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3040                skipCall |=
3041                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3042                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3043                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
3044            } else {
3045                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3046            }
3047        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3048            pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3049        }
3050
3051        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3052            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3053                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3054                                "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3055        }
3056    }
3057
3058    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3059        if (!my_data->physDevProperties.features.independentBlend) {
3060            if (pPipeline->attachments.size() > 1) {
3061                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3062                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3063                    if ((pAttachments[0].blendEnable != pAttachments[i].blendEnable) ||
3064                        (pAttachments[0].srcColorBlendFactor != pAttachments[i].srcColorBlendFactor) ||
3065                        (pAttachments[0].dstColorBlendFactor != pAttachments[i].dstColorBlendFactor) ||
3066                        (pAttachments[0].colorBlendOp != pAttachments[i].colorBlendOp) ||
3067                        (pAttachments[0].srcAlphaBlendFactor != pAttachments[i].srcAlphaBlendFactor) ||
3068                        (pAttachments[0].dstAlphaBlendFactor != pAttachments[i].dstAlphaBlendFactor) ||
3069                        (pAttachments[0].alphaBlendOp != pAttachments[i].alphaBlendOp) ||
3070                        (pAttachments[0].colorWriteMask != pAttachments[i].colorWriteMask)) {
3071                        skipCall |=
3072                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3073                            DRAWSTATE_INDEPENDENT_BLEND, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
3074                            "enabled, all elements of pAttachments must be identical");
3075                    }
3076                }
3077            }
3078        }
3079        if (!my_data->physDevProperties.features.logicOp &&
3080            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3081            skipCall |=
3082                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3083                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
3084                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
3085        }
3086        if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) &&
3087            ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) ||
3088             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
3089            skipCall |=
3090                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3091                        DRAWSTATE_INVALID_LOGIC_OP, "DS",
3092                        "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
3093        }
3094    }
3095
3096    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3097    // produces nonsense errors that confuse users. Other layers should already
3098    // emit errors for renderpass being invalid.
3099    auto rp_data = my_data->renderPassMap.find(pPipeline->graphicsPipelineCI.renderPass);
3100    if (rp_data != my_data->renderPassMap.end() &&
3101        pPipeline->graphicsPipelineCI.subpass >= rp_data->second->pCreateInfo->subpassCount) {
3102        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3103                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3104                                                                           "is out of range for this renderpass (0..%u)",
3105                            pPipeline->graphicsPipelineCI.subpass, rp_data->second->pCreateInfo->subpassCount - 1);
3106    }
3107
3108    if (!validate_and_capture_pipeline_shader_state(my_data, pPipeline)) {
3109        skipCall = VK_TRUE;
3110    }
3111    // VS is required
3112    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3113        skipCall |=
3114            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3115                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3116    }
3117    // Either both or neither TC/TE shaders should be defined
3118    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3119        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3120        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3121                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3122                            "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3123    }
3124    // Compute shaders should be specified independent of Gfx shaders
3125    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3126        (pPipeline->active_shaders &
3127         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3128          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3129        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3130                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3131                            "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3132    }
3133    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3134    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3135    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3136        (pPipeline->iaStateCI.topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3137        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3138                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3139                                                                           "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3140                                                                           "topology for tessellation pipelines");
3141    }
3142    if (pPipeline->iaStateCI.topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3143        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3144            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3145                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3146                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3147                                                                               "topology is only valid for tessellation pipelines");
3148        }
3149        if (!pPipeline->tessStateCI.patchControlPoints || (pPipeline->tessStateCI.patchControlPoints > 32)) {
3150            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3151                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3152                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3153                                                                               "topology used with patchControlPoints value %u."
3154                                                                               " patchControlPoints should be >0 and <=32.",
3155                                pPipeline->tessStateCI.patchControlPoints);
3156        }
3157    }
3158    // Viewport state must be included if rasterization is enabled.
3159    // If the viewport state is included, the viewport and scissor counts should always match.
3160    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3161    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3162        !pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
3163        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3164            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3165                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3166                                                                           "and scissors are dynamic PSO must include "
3167                                                                           "viewportCount and scissorCount in pViewportState.");
3168        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3169                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3170            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3171                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3172                                "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3173                                pPipeline->vpStateCI.viewportCount, pPipeline->vpStateCI.scissorCount);
3174        } else {
3175            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3176            VkBool32 dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3177            VkBool32 dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3178            if (!dynViewport) {
3179                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3180                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3181                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3182                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3183                                        "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3184                                        "must either include pViewports data, or include viewport in pDynamicState and set it with "
3185                                        "vkCmdSetViewport().",
3186                                        pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3187                }
3188            }
3189            if (!dynScissor) {
3190                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3191                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3192                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3193                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3194                                        "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3195                                        "must either include pScissors data, or include scissor in pDynamicState and set it with "
3196                                        "vkCmdSetScissor().",
3197                                        pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3198                }
3199            }
3200        }
3201    }
3202    return skipCall;
3203}
3204
3205// Init the pipeline mapping info based on pipeline create info LL tree
3206//  Threading note : Calls to this function should wrapped in mutex
3207// TODO : this should really just be in the constructor for PIPELINE_NODE
3208static PIPELINE_NODE *initGraphicsPipeline(layer_data *dev_data, const VkGraphicsPipelineCreateInfo *pCreateInfo) {
3209    PIPELINE_NODE *pPipeline = new PIPELINE_NODE;
3210
3211    // First init create info
3212    memcpy(&pPipeline->graphicsPipelineCI, pCreateInfo, sizeof(VkGraphicsPipelineCreateInfo));
3213
3214    size_t bufferSize = 0;
3215    const VkPipelineVertexInputStateCreateInfo *pVICI = NULL;
3216    const VkPipelineColorBlendStateCreateInfo *pCBCI = NULL;
3217
3218    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
3219        const VkPipelineShaderStageCreateInfo *pPSSCI = &pCreateInfo->pStages[i];
3220
3221        switch (pPSSCI->stage) {
3222        case VK_SHADER_STAGE_VERTEX_BIT:
3223            memcpy(&pPipeline->vsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3224            pPipeline->active_shaders |= VK_SHADER_STAGE_VERTEX_BIT;
3225            break;
3226        case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
3227            memcpy(&pPipeline->tcsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3228            pPipeline->active_shaders |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
3229            break;
3230        case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
3231            memcpy(&pPipeline->tesCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3232            pPipeline->active_shaders |= VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
3233            break;
3234        case VK_SHADER_STAGE_GEOMETRY_BIT:
3235            memcpy(&pPipeline->gsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3236            pPipeline->active_shaders |= VK_SHADER_STAGE_GEOMETRY_BIT;
3237            break;
3238        case VK_SHADER_STAGE_FRAGMENT_BIT:
3239            memcpy(&pPipeline->fsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3240            pPipeline->active_shaders |= VK_SHADER_STAGE_FRAGMENT_BIT;
3241            break;
3242        case VK_SHADER_STAGE_COMPUTE_BIT:
3243            // TODO : Flag error, CS is specified through VkComputePipelineCreateInfo
3244            pPipeline->active_shaders |= VK_SHADER_STAGE_COMPUTE_BIT;
3245            break;
3246        default:
3247            // TODO : Flag error
3248            break;
3249        }
3250    }
3251    // Copy over GraphicsPipelineCreateInfo structure embedded pointers
3252    if (pCreateInfo->stageCount != 0) {
3253        pPipeline->graphicsPipelineCI.pStages = new VkPipelineShaderStageCreateInfo[pCreateInfo->stageCount];
3254        bufferSize = pCreateInfo->stageCount * sizeof(VkPipelineShaderStageCreateInfo);
3255        memcpy((void *)pPipeline->graphicsPipelineCI.pStages, pCreateInfo->pStages, bufferSize);
3256    }
3257    if (pCreateInfo->pVertexInputState != NULL) {
3258        pPipeline->vertexInputCI = *pCreateInfo->pVertexInputState;
3259        // Copy embedded ptrs
3260        pVICI = pCreateInfo->pVertexInputState;
3261        if (pVICI->vertexBindingDescriptionCount) {
3262            pPipeline->vertexBindingDescriptions = std::vector<VkVertexInputBindingDescription>(
3263                pVICI->pVertexBindingDescriptions, pVICI->pVertexBindingDescriptions + pVICI->vertexBindingDescriptionCount);
3264        }
3265        if (pVICI->vertexAttributeDescriptionCount) {
3266            pPipeline->vertexAttributeDescriptions = std::vector<VkVertexInputAttributeDescription>(
3267                pVICI->pVertexAttributeDescriptions, pVICI->pVertexAttributeDescriptions + pVICI->vertexAttributeDescriptionCount);
3268        }
3269        pPipeline->graphicsPipelineCI.pVertexInputState = &pPipeline->vertexInputCI;
3270    }
3271    if (pCreateInfo->pInputAssemblyState != NULL) {
3272        pPipeline->iaStateCI = *pCreateInfo->pInputAssemblyState;
3273        pPipeline->graphicsPipelineCI.pInputAssemblyState = &pPipeline->iaStateCI;
3274    }
3275    if (pCreateInfo->pTessellationState != NULL) {
3276        pPipeline->tessStateCI = *pCreateInfo->pTessellationState;
3277        pPipeline->graphicsPipelineCI.pTessellationState = &pPipeline->tessStateCI;
3278    }
3279    if (pCreateInfo->pViewportState != NULL) {
3280        pPipeline->vpStateCI = *pCreateInfo->pViewportState;
3281        pPipeline->graphicsPipelineCI.pViewportState = &pPipeline->vpStateCI;
3282    }
3283    if (pCreateInfo->pRasterizationState != NULL) {
3284        pPipeline->rsStateCI = *pCreateInfo->pRasterizationState;
3285        pPipeline->graphicsPipelineCI.pRasterizationState = &pPipeline->rsStateCI;
3286    }
3287    if (pCreateInfo->pMultisampleState != NULL) {
3288        pPipeline->msStateCI = *pCreateInfo->pMultisampleState;
3289        pPipeline->graphicsPipelineCI.pMultisampleState = &pPipeline->msStateCI;
3290    }
3291    if (pCreateInfo->pDepthStencilState != NULL) {
3292        pPipeline->dsStateCI = *pCreateInfo->pDepthStencilState;
3293        pPipeline->graphicsPipelineCI.pDepthStencilState = &pPipeline->dsStateCI;
3294    }
3295    if (pCreateInfo->pColorBlendState != NULL) {
3296        pPipeline->cbStateCI = *pCreateInfo->pColorBlendState;
3297        // Copy embedded ptrs
3298        pCBCI = pCreateInfo->pColorBlendState;
3299        if (pCBCI->attachmentCount) {
3300            pPipeline->attachments = std::vector<VkPipelineColorBlendAttachmentState>(
3301                pCBCI->pAttachments, pCBCI->pAttachments + pCBCI->attachmentCount);
3302        }
3303        pPipeline->graphicsPipelineCI.pColorBlendState = &pPipeline->cbStateCI;
3304    }
3305    if (pCreateInfo->pDynamicState != NULL) {
3306        pPipeline->dynStateCI = *pCreateInfo->pDynamicState;
3307        if (pPipeline->dynStateCI.dynamicStateCount) {
3308            pPipeline->dynStateCI.pDynamicStates = new VkDynamicState[pPipeline->dynStateCI.dynamicStateCount];
3309            bufferSize = pPipeline->dynStateCI.dynamicStateCount * sizeof(VkDynamicState);
3310            memcpy((void *)pPipeline->dynStateCI.pDynamicStates, pCreateInfo->pDynamicState->pDynamicStates, bufferSize);
3311        }
3312        pPipeline->graphicsPipelineCI.pDynamicState = &pPipeline->dynStateCI;
3313    }
3314    return pPipeline;
3315}
3316
3317// Free the Pipeline nodes
3318static void deletePipelines(layer_data *my_data) {
3319    if (my_data->pipelineMap.size() <= 0)
3320        return;
3321    for (auto ii = my_data->pipelineMap.begin(); ii != my_data->pipelineMap.end(); ++ii) {
3322        if ((*ii).second->graphicsPipelineCI.stageCount != 0) {
3323            delete[](*ii).second->graphicsPipelineCI.pStages;
3324        }
3325        if ((*ii).second->dynStateCI.dynamicStateCount != 0) {
3326            delete[](*ii).second->dynStateCI.pDynamicStates;
3327        }
3328        delete (*ii).second;
3329    }
3330    my_data->pipelineMap.clear();
3331}
3332
3333// For given pipeline, return number of MSAA samples, or one if MSAA disabled
3334static VkSampleCountFlagBits getNumSamples(layer_data *my_data, const VkPipeline pipeline) {
3335    PIPELINE_NODE *pPipe = my_data->pipelineMap[pipeline];
3336    if (VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pPipe->msStateCI.sType) {
3337        return pPipe->msStateCI.rasterizationSamples;
3338    }
3339    return VK_SAMPLE_COUNT_1_BIT;
3340}
3341
3342// Validate state related to the PSO
3343static VkBool32 validatePipelineState(layer_data *my_data, const GLOBAL_CB_NODE *pCB, const VkPipelineBindPoint pipelineBindPoint,
3344                                      const VkPipeline pipeline) {
3345    VkBool32 skipCall = VK_FALSE;
3346    if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
3347        // Verify that any MSAA request in PSO matches sample# in bound FB
3348        // Skip the check if rasterization is disabled.
3349        PIPELINE_NODE *pPipeline = my_data->pipelineMap[pipeline];
3350        if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3351            !pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
3352            VkSampleCountFlagBits psoNumSamples = getNumSamples(my_data, pipeline);
3353            if (pCB->activeRenderPass) {
3354                const VkRenderPassCreateInfo *pRPCI = my_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
3355                const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
3356                VkSampleCountFlagBits subpassNumSamples = (VkSampleCountFlagBits)0;
3357                uint32_t i;
3358
3359                if (pPipeline->cbStateCI.attachmentCount != pSD->colorAttachmentCount) {
3360                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3361                                        reinterpret_cast<const uint64_t &>(pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
3362                                        "Mismatch between blend state attachment count %u and subpass %u color attachment "
3363                                        "count %u!  These must be the same.",
3364                                        pPipeline->cbStateCI.attachmentCount, pCB->activeSubpass, pSD->colorAttachmentCount);
3365                }
3366
3367                for (i = 0; i < pSD->colorAttachmentCount; i++) {
3368                    VkSampleCountFlagBits samples;
3369
3370                    if (pSD->pColorAttachments[i].attachment == VK_ATTACHMENT_UNUSED)
3371                        continue;
3372
3373                    samples = pRPCI->pAttachments[pSD->pColorAttachments[i].attachment].samples;
3374                    if (subpassNumSamples == (VkSampleCountFlagBits)0) {
3375                        subpassNumSamples = samples;
3376                    } else if (subpassNumSamples != samples) {
3377                        subpassNumSamples = (VkSampleCountFlagBits)-1;
3378                        break;
3379                    }
3380                }
3381                if (pSD->pDepthStencilAttachment && pSD->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3382                    const VkSampleCountFlagBits samples = pRPCI->pAttachments[pSD->pDepthStencilAttachment->attachment].samples;
3383                    if (subpassNumSamples == (VkSampleCountFlagBits)0)
3384                        subpassNumSamples = samples;
3385                    else if (subpassNumSamples != samples)
3386                        subpassNumSamples = (VkSampleCountFlagBits)-1;
3387                }
3388
3389                if ((pSD->colorAttachmentCount > 0 || pSD->pDepthStencilAttachment) &&
3390                    psoNumSamples != subpassNumSamples) {
3391                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3392                                        (uint64_t)pipeline, __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3393                                        "Num samples mismatch! Binding PSO (%#" PRIxLEAST64
3394                                        ") with %u samples while current RenderPass (%#" PRIxLEAST64 ") w/ %u samples!",
3395                                        (uint64_t)pipeline, psoNumSamples, (uint64_t)pCB->activeRenderPass, subpassNumSamples);
3396                }
3397            } else {
3398                // TODO : I believe it's an error if we reach this point and don't have an activeRenderPass
3399                //   Verify and flag error as appropriate
3400            }
3401        }
3402        // TODO : Add more checks here
3403    } else {
3404        // TODO : Validate non-gfx pipeline updates
3405    }
3406    return skipCall;
3407}
3408
3409// Block of code at start here specifically for managing/tracking DSs
3410
3411// Return Pool node ptr for specified pool or else NULL
3412static DESCRIPTOR_POOL_NODE *getPoolNode(layer_data *my_data, const VkDescriptorPool pool) {
3413    if (my_data->descriptorPoolMap.find(pool) == my_data->descriptorPoolMap.end()) {
3414        return NULL;
3415    }
3416    return my_data->descriptorPoolMap[pool];
3417}
3418
3419static LAYOUT_NODE *getLayoutNode(layer_data *my_data, const VkDescriptorSetLayout layout) {
3420    if (my_data->descriptorSetLayoutMap.find(layout) == my_data->descriptorSetLayoutMap.end()) {
3421        return NULL;
3422    }
3423    return my_data->descriptorSetLayoutMap[layout];
3424}
3425
3426// Return VK_FALSE if update struct is of valid type, otherwise flag error and return code from callback
3427static VkBool32 validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3428    switch (pUpdateStruct->sType) {
3429    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3430    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3431        return VK_FALSE;
3432    default:
3433        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3434                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3435                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3436                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3437    }
3438}
3439
3440// Set count for given update struct in the last parameter
3441// Return value of skipCall, which is only VK_TRUE if error occurs and callback signals execution to cease
3442static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3443    switch (pUpdateStruct->sType) {
3444    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3445        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3446    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3447        // TODO : Need to understand this case better and make sure code is correct
3448        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3449    default:
3450        return 0;
3451    }
3452    return 0;
3453}
3454
3455// For given layout and update, return the first overall index of the layout that is updated
3456static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding,
3457                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3458    return getBindingStartIndex(pLayout, binding) + arrayIndex;
3459}
3460
3461// For given layout and update, return the last overall index of the layout that is updated
3462static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding,
3463                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3464    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3465    return getBindingStartIndex(pLayout, binding) + arrayIndex + count - 1;
3466}
3467
3468// Verify that the descriptor type in the update struct matches what's expected by the layout
3469static VkBool32 validateUpdateConsistency(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout,
3470                                          const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3471    // First get actual type of update
3472    VkBool32 skipCall = VK_FALSE;
3473    VkDescriptorType actualType;
3474    uint32_t i = 0;
3475    switch (pUpdateStruct->sType) {
3476    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3477        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3478        break;
3479    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3480        /* no need to validate */
3481        return VK_FALSE;
3482        break;
3483    default:
3484        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3485                            DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3486                            "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3487                            string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3488    }
3489    if (VK_FALSE == skipCall) {
3490        // Set first stageFlags as reference and verify that all other updates match it
3491        VkShaderStageFlags refStageFlags = pLayout->stageFlags[startIndex];
3492        for (i = startIndex; i <= endIndex; i++) {
3493            if (pLayout->descriptorTypes[i] != actualType) {
3494                skipCall |= log_msg(
3495                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3496                    DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3497                    "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3498                    string_VkDescriptorType(actualType), string_VkDescriptorType(pLayout->descriptorTypes[i]));
3499            }
3500            if (pLayout->stageFlags[i] != refStageFlags) {
3501                skipCall |= log_msg(
3502                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3503                    DRAWSTATE_DESCRIPTOR_STAGEFLAGS_MISMATCH, "DS",
3504                    "Write descriptor update has stageFlags %x that do not match overlapping binding descriptor stageFlags of %x!",
3505                    refStageFlags, pLayout->stageFlags[i]);
3506            }
3507        }
3508    }
3509    return skipCall;
3510}
3511
3512// Determine the update type, allocate a new struct of that type, shadow the given pUpdate
3513//   struct into the pNewNode param. Return VK_TRUE if error condition encountered and callback signals early exit.
3514// NOTE : Calls to this function should be wrapped in mutex
3515static VkBool32 shadowUpdateNode(layer_data *my_data, const VkDevice device, GENERIC_HEADER *pUpdate, GENERIC_HEADER **pNewNode) {
3516    VkBool32 skipCall = VK_FALSE;
3517    VkWriteDescriptorSet *pWDS = NULL;
3518    VkCopyDescriptorSet *pCDS = NULL;
3519    switch (pUpdate->sType) {
3520    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3521        pWDS = new VkWriteDescriptorSet;
3522        *pNewNode = (GENERIC_HEADER *)pWDS;
3523        memcpy(pWDS, pUpdate, sizeof(VkWriteDescriptorSet));
3524
3525        switch (pWDS->descriptorType) {
3526        case VK_DESCRIPTOR_TYPE_SAMPLER:
3527        case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3528        case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3529        case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
3530            VkDescriptorImageInfo *info = new VkDescriptorImageInfo[pWDS->descriptorCount];
3531            memcpy(info, pWDS->pImageInfo, pWDS->descriptorCount * sizeof(VkDescriptorImageInfo));
3532            pWDS->pImageInfo = info;
3533        } break;
3534        case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3535        case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
3536            VkBufferView *info = new VkBufferView[pWDS->descriptorCount];
3537            memcpy(info, pWDS->pTexelBufferView, pWDS->descriptorCount * sizeof(VkBufferView));
3538            pWDS->pTexelBufferView = info;
3539        } break;
3540        case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3541        case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3542        case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3543        case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
3544            VkDescriptorBufferInfo *info = new VkDescriptorBufferInfo[pWDS->descriptorCount];
3545            memcpy(info, pWDS->pBufferInfo, pWDS->descriptorCount * sizeof(VkDescriptorBufferInfo));
3546            pWDS->pBufferInfo = info;
3547        } break;
3548        default:
3549            return VK_ERROR_VALIDATION_FAILED_EXT;
3550            break;
3551        }
3552        break;
3553    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3554        pCDS = new VkCopyDescriptorSet;
3555        *pNewNode = (GENERIC_HEADER *)pCDS;
3556        memcpy(pCDS, pUpdate, sizeof(VkCopyDescriptorSet));
3557        break;
3558    default:
3559        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3560                    DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3561                    "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3562                    string_VkStructureType(pUpdate->sType), pUpdate->sType))
3563            return VK_TRUE;
3564    }
3565    // Make sure that pNext for the end of shadow copy is NULL
3566    (*pNewNode)->pNext = NULL;
3567    return skipCall;
3568}
3569
3570// Verify that given sampler is valid
3571static VkBool32 validateSampler(const layer_data *my_data, const VkSampler *pSampler, const VkBool32 immutable) {
3572    VkBool32 skipCall = VK_FALSE;
3573    auto sampIt = my_data->sampleMap.find(*pSampler);
3574    if (sampIt == my_data->sampleMap.end()) {
3575        if (!immutable) {
3576            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3577                                (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3578                                "vkUpdateDescriptorSets: Attempt to update descriptor with invalid sampler %#" PRIxLEAST64,
3579                                (uint64_t)*pSampler);
3580        } else { // immutable
3581            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3582                                (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3583                                "vkUpdateDescriptorSets: Attempt to update descriptor whose binding has an invalid immutable "
3584                                "sampler %#" PRIxLEAST64,
3585                                (uint64_t)*pSampler);
3586        }
3587    } else {
3588        // TODO : Any further checks we want to do on the sampler?
3589    }
3590    return skipCall;
3591}
3592
3593// find layout(s) on the cmd buf level
3594bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3595    ImageSubresourcePair imgpair = {image, true, range};
3596    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3597    if (imgsubIt == pCB->imageLayoutMap.end()) {
3598        imgpair = {image, false, VkImageSubresource()};
3599        imgsubIt = pCB->imageLayoutMap.find(imgpair);
3600        if (imgsubIt == pCB->imageLayoutMap.end())
3601            return false;
3602    }
3603    node = imgsubIt->second;
3604    return true;
3605}
3606
3607// find layout(s) on the global level
3608bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3609    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3610    if (imgsubIt == my_data->imageLayoutMap.end()) {
3611        imgpair = {imgpair.image, false, VkImageSubresource()};
3612        imgsubIt = my_data->imageLayoutMap.find(imgpair);
3613        if (imgsubIt == my_data->imageLayoutMap.end())
3614            return false;
3615    }
3616    layout = imgsubIt->second.layout;
3617    return true;
3618}
3619
3620bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3621    ImageSubresourcePair imgpair = {image, true, range};
3622    return FindLayout(my_data, imgpair, layout);
3623}
3624
3625bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3626    auto sub_data = my_data->imageSubresourceMap.find(image);
3627    if (sub_data == my_data->imageSubresourceMap.end())
3628        return false;
3629    auto imgIt = my_data->imageMap.find(image);
3630    if (imgIt == my_data->imageMap.end())
3631        return false;
3632    bool ignoreGlobal = false;
3633    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3634    // potential errors in this case.
3635    if (sub_data->second.size() >= (imgIt->second.createInfo.arrayLayers * imgIt->second.createInfo.mipLevels + 1)) {
3636        ignoreGlobal = true;
3637    }
3638    for (auto imgsubpair : sub_data->second) {
3639        if (ignoreGlobal && !imgsubpair.hasSubresource)
3640            continue;
3641        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3642        if (img_data != my_data->imageLayoutMap.end()) {
3643            layouts.push_back(img_data->second.layout);
3644        }
3645    }
3646    return true;
3647}
3648
3649// Set the layout on the global level
3650void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3651    VkImage &image = imgpair.image;
3652    // TODO (mlentine): Maybe set format if new? Not used atm.
3653    my_data->imageLayoutMap[imgpair].layout = layout;
3654    // TODO (mlentine): Maybe make vector a set?
3655    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3656    if (subresource == my_data->imageSubresourceMap[image].end()) {
3657        my_data->imageSubresourceMap[image].push_back(imgpair);
3658    }
3659}
3660
3661// Set the layout on the cmdbuf level
3662void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3663    pCB->imageLayoutMap[imgpair] = node;
3664    // TODO (mlentine): Maybe make vector a set?
3665    auto subresource =
3666        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3667    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3668        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3669    }
3670}
3671
3672void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3673    // TODO (mlentine): Maybe make vector a set?
3674    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3675        pCB->imageSubresourceMap[imgpair.image].end()) {
3676        pCB->imageLayoutMap[imgpair].layout = layout;
3677    } else {
3678        // TODO (mlentine): Could be expensive and might need to be removed.
3679        assert(imgpair.hasSubresource);
3680        IMAGE_CMD_BUF_LAYOUT_NODE node;
3681        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3682            node.initialLayout = layout;
3683        }
3684        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3685    }
3686}
3687
3688template <class OBJECT, class LAYOUT>
3689void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3690    if (imgpair.subresource.aspectMask & aspectMask) {
3691        imgpair.subresource.aspectMask = aspectMask;
3692        SetLayout(pObject, imgpair, layout);
3693    }
3694}
3695
3696template <class OBJECT, class LAYOUT>
3697void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3698    ImageSubresourcePair imgpair = {image, true, range};
3699    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3700    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3701    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3702    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3703}
3704
3705template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3706    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3707    SetLayout(pObject, image, imgpair, layout);
3708}
3709
3710void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3711    auto image_view_data = dev_data->imageViewMap.find(imageView);
3712    assert(image_view_data != dev_data->imageViewMap.end());
3713    const VkImage &image = image_view_data->second.image;
3714    const VkImageSubresourceRange &subRange = image_view_data->second.subresourceRange;
3715    // TODO: Do not iterate over every possibility - consolidate where possible
3716    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3717        uint32_t level = subRange.baseMipLevel + j;
3718        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3719            uint32_t layer = subRange.baseArrayLayer + k;
3720            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3721            SetLayout(pCB, image, sub, layout);
3722        }
3723    }
3724}
3725
3726// Verify that given imageView is valid
3727static VkBool32 validateImageView(const layer_data *my_data, const VkImageView *pImageView, const VkImageLayout imageLayout) {
3728    VkBool32 skipCall = VK_FALSE;
3729    auto ivIt = my_data->imageViewMap.find(*pImageView);
3730    if (ivIt == my_data->imageViewMap.end()) {
3731        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3732                            (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3733                            "vkUpdateDescriptorSets: Attempt to update descriptor with invalid imageView %#" PRIxLEAST64,
3734                            (uint64_t)*pImageView);
3735    } else {
3736        // Validate that imageLayout is compatible with aspectMask and image format
3737        VkImageAspectFlags aspectMask = ivIt->second.subresourceRange.aspectMask;
3738        VkImage image = ivIt->second.image;
3739        // TODO : Check here in case we have a bad image
3740        VkFormat format = VK_FORMAT_MAX_ENUM;
3741        auto imgIt = my_data->imageMap.find(image);
3742        if (imgIt != my_data->imageMap.end()) {
3743            format = (*imgIt).second.createInfo.format;
3744        } else {
3745            // Also need to check the swapchains.
3746            auto swapchainIt = my_data->device_extensions.imageToSwapchainMap.find(image);
3747            if (swapchainIt != my_data->device_extensions.imageToSwapchainMap.end()) {
3748                VkSwapchainKHR swapchain = swapchainIt->second;
3749                auto swapchain_nodeIt = my_data->device_extensions.swapchainMap.find(swapchain);
3750                if (swapchain_nodeIt != my_data->device_extensions.swapchainMap.end()) {
3751                    SWAPCHAIN_NODE *pswapchain_node = swapchain_nodeIt->second;
3752                    format = pswapchain_node->createInfo.imageFormat;
3753                }
3754            }
3755        }
3756        if (format == VK_FORMAT_MAX_ENUM) {
3757            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3758                                (uint64_t)image, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3759                                "vkUpdateDescriptorSets: Attempt to update descriptor with invalid image %#" PRIxLEAST64
3760                                " in imageView %#" PRIxLEAST64,
3761                                (uint64_t)image, (uint64_t)*pImageView);
3762        } else {
3763            VkBool32 ds = vk_format_is_depth_or_stencil(format);
3764            switch (imageLayout) {
3765            case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
3766                // Only Color bit must be set
3767                if ((aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
3768                    skipCall |=
3769                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3770                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3771                                "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3772                                "and imageView %#" PRIxLEAST64 ""
3773                                " that does not have VK_IMAGE_ASPECT_COLOR_BIT set.",
3774                                (uint64_t)*pImageView);
3775                }
3776                // format must NOT be DS
3777                if (ds) {
3778                    skipCall |=
3779                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3780                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3781                                "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3782                                "and imageView %#" PRIxLEAST64 ""
3783                                " but the image format is %s which is not a color format.",
3784                                (uint64_t)*pImageView, string_VkFormat(format));
3785                }
3786                break;
3787            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
3788            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
3789                // Depth or stencil bit must be set, but both must NOT be set
3790                if (aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
3791                    if (aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
3792                        // both  must NOT be set
3793                        skipCall |=
3794                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3795                                    (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3796                                    "vkUpdateDescriptorSets: Updating descriptor with imageView %#" PRIxLEAST64 ""
3797                                    " that has both STENCIL and DEPTH aspects set",
3798                                    (uint64_t)*pImageView);
3799                    }
3800                } else if (!(aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
3801                    // Neither were set
3802                    skipCall |=
3803                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3804                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3805                                "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3806                                " that does not have STENCIL or DEPTH aspect set.",
3807                                string_VkImageLayout(imageLayout), (uint64_t)*pImageView);
3808                }
3809                // format must be DS
3810                if (!ds) {
3811                    skipCall |=
3812                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3813                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3814                                "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3815                                " but the image format is %s which is not a depth/stencil format.",
3816                                string_VkImageLayout(imageLayout), (uint64_t)*pImageView, string_VkFormat(format));
3817                }
3818                break;
3819            default:
3820                // anything to check for other layouts?
3821                break;
3822            }
3823        }
3824    }
3825    return skipCall;
3826}
3827
3828// Verify that given bufferView is valid
3829static VkBool32 validateBufferView(const layer_data *my_data, const VkBufferView *pBufferView) {
3830    VkBool32 skipCall = VK_FALSE;
3831    auto sampIt = my_data->bufferViewMap.find(*pBufferView);
3832    if (sampIt == my_data->bufferViewMap.end()) {
3833        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT,
3834                            (uint64_t)*pBufferView, __LINE__, DRAWSTATE_BUFFERVIEW_DESCRIPTOR_ERROR, "DS",
3835                            "vkUpdateDescriptorSets: Attempt to update descriptor with invalid bufferView %#" PRIxLEAST64,
3836                            (uint64_t)*pBufferView);
3837    } else {
3838        // TODO : Any further checks we want to do on the bufferView?
3839    }
3840    return skipCall;
3841}
3842
3843// Verify that given bufferInfo is valid
3844static VkBool32 validateBufferInfo(const layer_data *my_data, const VkDescriptorBufferInfo *pBufferInfo) {
3845    VkBool32 skipCall = VK_FALSE;
3846    auto sampIt = my_data->bufferMap.find(pBufferInfo->buffer);
3847    if (sampIt == my_data->bufferMap.end()) {
3848        skipCall |=
3849            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3850                    (uint64_t)pBufferInfo->buffer, __LINE__, DRAWSTATE_BUFFERINFO_DESCRIPTOR_ERROR, "DS",
3851                    "vkUpdateDescriptorSets: Attempt to update descriptor where bufferInfo has invalid buffer %#" PRIxLEAST64,
3852                    (uint64_t)pBufferInfo->buffer);
3853    } else {
3854        // TODO : Any further checks we want to do on the bufferView?
3855    }
3856    return skipCall;
3857}
3858
3859static VkBool32 validateUpdateContents(const layer_data *my_data, const VkWriteDescriptorSet *pWDS,
3860                                       const VkDescriptorSetLayoutBinding *pLayoutBinding) {
3861    VkBool32 skipCall = VK_FALSE;
3862    // First verify that for the given Descriptor type, the correct DescriptorInfo data is supplied
3863    const VkSampler *pSampler = NULL;
3864    VkBool32 immutable = VK_FALSE;
3865    uint32_t i = 0;
3866    // For given update type, verify that update contents are correct
3867    switch (pWDS->descriptorType) {
3868    case VK_DESCRIPTOR_TYPE_SAMPLER:
3869        for (i = 0; i < pWDS->descriptorCount; ++i) {
3870            skipCall |= validateSampler(my_data, &(pWDS->pImageInfo[i].sampler), immutable);
3871        }
3872        break;
3873    case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3874        for (i = 0; i < pWDS->descriptorCount; ++i) {
3875            if (NULL == pLayoutBinding->pImmutableSamplers) {
3876                pSampler = &(pWDS->pImageInfo[i].sampler);
3877                if (immutable) {
3878                    skipCall |= log_msg(
3879                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3880                        (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3881                        "vkUpdateDescriptorSets: Update #%u is not an immutable sampler %#" PRIxLEAST64
3882                        ", but previous update(s) from this "
3883                        "VkWriteDescriptorSet struct used an immutable sampler. All updates from a single struct must either "
3884                        "use immutable or non-immutable samplers.",
3885                        i, (uint64_t)*pSampler);
3886                }
3887            } else {
3888                if (i > 0 && !immutable) {
3889                    skipCall |= log_msg(
3890                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3891                        (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3892                        "vkUpdateDescriptorSets: Update #%u is an immutable sampler, but previous update(s) from this "
3893                        "VkWriteDescriptorSet struct used a non-immutable sampler. All updates from a single struct must either "
3894                        "use immutable or non-immutable samplers.",
3895                        i);
3896                }
3897                immutable = VK_TRUE;
3898                pSampler = &(pLayoutBinding->pImmutableSamplers[i]);
3899            }
3900            skipCall |= validateSampler(my_data, pSampler, immutable);
3901        }
3902    // Intentionally fall through here to also validate image stuff
3903    case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3904    case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
3905    case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
3906        for (i = 0; i < pWDS->descriptorCount; ++i) {
3907            skipCall |= validateImageView(my_data, &(pWDS->pImageInfo[i].imageView), pWDS->pImageInfo[i].imageLayout);
3908        }
3909        break;
3910    case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3911    case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
3912        for (i = 0; i < pWDS->descriptorCount; ++i) {
3913            skipCall |= validateBufferView(my_data, &(pWDS->pTexelBufferView[i]));
3914        }
3915        break;
3916    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3917    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3918    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3919    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
3920        for (i = 0; i < pWDS->descriptorCount; ++i) {
3921            skipCall |= validateBufferInfo(my_data, &(pWDS->pBufferInfo[i]));
3922        }
3923        break;
3924    default:
3925        break;
3926    }
3927    return skipCall;
3928}
3929// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3930// func_str is the name of the calling function
3931// Return VK_FALSE if no errors occur
3932// Return VK_TRUE if validation error occurs and callback returns VK_TRUE (to skip upcoming API call down the chain)
3933VkBool32 validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
3934    VkBool32 skip_call = VK_FALSE;
3935    auto set_node = my_data->setMap.find(set);
3936    if (set_node == my_data->setMap.end()) {
3937        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3938                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3939                             "Cannot call %s() on descriptor set %" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3940                             (uint64_t)(set));
3941    } else {
3942        if (set_node->second->in_use.load()) {
3943            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3944                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
3945                                 "DS", "Cannot call %s() on descriptor set %" PRIxLEAST64 " that is in use by a command buffer.",
3946                                 func_str.c_str(), (uint64_t)(set));
3947        }
3948    }
3949    return skip_call;
3950}
3951static void invalidateBoundCmdBuffers(layer_data *dev_data, const SET_NODE *pSet) {
3952    // Flag any CBs this set is bound to as INVALID
3953    for (auto cb : pSet->boundCmdBuffers) {
3954        auto cb_node = dev_data->commandBufferMap.find(cb);
3955        if (cb_node != dev_data->commandBufferMap.end()) {
3956            cb_node->second->state = CB_INVALID;
3957        }
3958    }
3959}
3960// update DS mappings based on write and copy update arrays
3961static VkBool32 dsUpdate(layer_data *my_data, VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pWDS,
3962                         uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pCDS) {
3963    VkBool32 skipCall = VK_FALSE;
3964
3965    LAYOUT_NODE *pLayout = NULL;
3966    VkDescriptorSetLayoutCreateInfo *pLayoutCI = NULL;
3967    // Validate Write updates
3968    uint32_t i = 0;
3969    for (i = 0; i < descriptorWriteCount; i++) {
3970        VkDescriptorSet ds = pWDS[i].dstSet;
3971        SET_NODE *pSet = my_data->setMap[ds];
3972        // Set being updated cannot be in-flight
3973        if ((skipCall = validateIdleDescriptorSet(my_data, ds, "VkUpdateDescriptorSets")) == VK_TRUE)
3974            return skipCall;
3975        // If set is bound to any cmdBuffers, mark them invalid
3976        invalidateBoundCmdBuffers(my_data, pSet);
3977        GENERIC_HEADER *pUpdate = (GENERIC_HEADER *)&pWDS[i];
3978        pLayout = pSet->pLayout;
3979        // First verify valid update struct
3980        if ((skipCall = validUpdateStruct(my_data, device, pUpdate)) == VK_TRUE) {
3981            break;
3982        }
3983        uint32_t binding = 0, endIndex = 0;
3984        binding = pWDS[i].dstBinding;
3985        auto bindingToIndex = pLayout->bindingToIndexMap.find(binding);
3986        // Make sure that layout being updated has the binding being updated
3987        if (bindingToIndex == pLayout->bindingToIndexMap.end()) {
3988            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3989                                (uint64_t)(ds), __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
3990                                "Descriptor Set %" PRIu64 " does not have binding to match "
3991                                "update binding %u for update type "
3992                                "%s!",
3993                                (uint64_t)(ds), binding, string_VkStructureType(pUpdate->sType));
3994        } else {
3995            // Next verify that update falls within size of given binding
3996            endIndex = getUpdateEndIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate);
3997            if (getBindingEndIndex(pLayout, binding) < endIndex) {
3998                pLayoutCI = &pLayout->createInfo;
3999                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
4000                skipCall |=
4001                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4002                            (uint64_t)(ds), __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4003                            "Descriptor update type of %s is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4004                            string_VkStructureType(pUpdate->sType), binding, DSstr.c_str());
4005            } else { // TODO : should we skip update on a type mismatch or force it?
4006                uint32_t startIndex;
4007                startIndex = getUpdateStartIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate);
4008                // Layout bindings match w/ update, now verify that update type
4009                // & stageFlags are the same for entire update
4010                if ((skipCall = validateUpdateConsistency(my_data, device, pLayout, pUpdate, startIndex, endIndex)) == VK_FALSE) {
4011                    // The update is within bounds and consistent, but need to
4012                    // make sure contents make sense as well
4013                    if ((skipCall = validateUpdateContents(my_data, &pWDS[i],
4014                                                           &pLayout->createInfo.pBindings[bindingToIndex->second])) == VK_FALSE) {
4015                        // Update is good. Save the update info
4016                        // Create new update struct for this set's shadow copy
4017                        GENERIC_HEADER *pNewNode = NULL;
4018                        skipCall |= shadowUpdateNode(my_data, device, pUpdate, &pNewNode);
4019                        if (NULL == pNewNode) {
4020                            skipCall |= log_msg(
4021                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4022                                (uint64_t)(ds), __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
4023                                "Out of memory while attempting to allocate UPDATE struct in vkUpdateDescriptors()");
4024                        } else {
4025                            // Insert shadow node into LL of updates for this set
4026                            pNewNode->pNext = pSet->pUpdateStructs;
4027                            pSet->pUpdateStructs = pNewNode;
4028                            // Now update appropriate descriptor(s) to point to new Update node
4029                            for (uint32_t j = startIndex; j <= endIndex; j++) {
4030                                assert(j < pSet->descriptorCount);
4031                                pSet->pDescriptorUpdates[j] = pNewNode;
4032                            }
4033                        }
4034                    }
4035                }
4036            }
4037        }
4038    }
4039    // Now validate copy updates
4040    for (i = 0; i < descriptorCopyCount; ++i) {
4041        SET_NODE *pSrcSet = NULL, *pDstSet = NULL;
4042        LAYOUT_NODE *pSrcLayout = NULL, *pDstLayout = NULL;
4043        uint32_t srcStartIndex = 0, srcEndIndex = 0, dstStartIndex = 0, dstEndIndex = 0;
4044        // For each copy make sure that update falls within given layout and that types match
4045        pSrcSet = my_data->setMap[pCDS[i].srcSet];
4046        pDstSet = my_data->setMap[pCDS[i].dstSet];
4047        // Set being updated cannot be in-flight
4048        if ((skipCall = validateIdleDescriptorSet(my_data, pDstSet->set, "VkUpdateDescriptorSets")) == VK_TRUE)
4049            return skipCall;
4050        invalidateBoundCmdBuffers(my_data, pDstSet);
4051        pSrcLayout = pSrcSet->pLayout;
4052        pDstLayout = pDstSet->pLayout;
4053        // Validate that src binding is valid for src set layout
4054        if (pSrcLayout->bindingToIndexMap.find(pCDS[i].srcBinding) == pSrcLayout->bindingToIndexMap.end()) {
4055            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4056                                (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
4057                                "Copy descriptor update %u has srcBinding %u "
4058                                "which is out of bounds for underlying SetLayout "
4059                                "%#" PRIxLEAST64 " which only has bindings 0-%u.",
4060                                i, pCDS[i].srcBinding, (uint64_t)pSrcLayout->layout, pSrcLayout->createInfo.bindingCount - 1);
4061        } else if (pDstLayout->bindingToIndexMap.find(pCDS[i].dstBinding) == pDstLayout->bindingToIndexMap.end()) {
4062            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4063                                (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
4064                                "Copy descriptor update %u has dstBinding %u "
4065                                "which is out of bounds for underlying SetLayout "
4066                                "%#" PRIxLEAST64 " which only has bindings 0-%u.",
4067                                i, pCDS[i].dstBinding, (uint64_t)pDstLayout->layout, pDstLayout->createInfo.bindingCount - 1);
4068        } else {
4069            // Proceed with validation. Bindings are ok, but make sure update is within bounds of given layout
4070            srcEndIndex = getUpdateEndIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement,
4071                                            (const GENERIC_HEADER *)&(pCDS[i]));
4072            dstEndIndex = getUpdateEndIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement,
4073                                            (const GENERIC_HEADER *)&(pCDS[i]));
4074            if (getBindingEndIndex(pSrcLayout, pCDS[i].srcBinding) < srcEndIndex) {
4075                pLayoutCI = &pSrcLayout->createInfo;
4076                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
4077                skipCall |=
4078                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4079                            (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4080                            "Copy descriptor src update is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4081                            pCDS[i].srcBinding, DSstr.c_str());
4082            } else if (getBindingEndIndex(pDstLayout, pCDS[i].dstBinding) < dstEndIndex) {
4083                pLayoutCI = &pDstLayout->createInfo;
4084                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
4085                skipCall |=
4086                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4087                            (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4088                            "Copy descriptor dest update is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4089                            pCDS[i].dstBinding, DSstr.c_str());
4090            } else {
4091                srcStartIndex = getUpdateStartIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement,
4092                                                    (const GENERIC_HEADER *)&(pCDS[i]));
4093                dstStartIndex = getUpdateStartIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement,
4094                                                    (const GENERIC_HEADER *)&(pCDS[i]));
4095                for (uint32_t j = 0; j < pCDS[i].descriptorCount; ++j) {
4096                    // For copy just make sure that the types match and then perform the update
4097                    if (pSrcLayout->descriptorTypes[srcStartIndex + j] != pDstLayout->descriptorTypes[dstStartIndex + j]) {
4098                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4099                                            __LINE__, DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
4100                                            "Copy descriptor update index %u, update count #%u, has src update descriptor type %s "
4101                                            "that does not match overlapping dest descriptor type of %s!",
4102                                            i, j + 1, string_VkDescriptorType(pSrcLayout->descriptorTypes[srcStartIndex + j]),
4103                                            string_VkDescriptorType(pDstLayout->descriptorTypes[dstStartIndex + j]));
4104                    } else {
4105                        // point dst descriptor at corresponding src descriptor
4106                        // TODO : This may be a hole. I believe copy should be its own copy,
4107                        //  otherwise a subsequent write update to src will incorrectly affect the copy
4108                        pDstSet->pDescriptorUpdates[j + dstStartIndex] = pSrcSet->pDescriptorUpdates[j + srcStartIndex];
4109                        pDstSet->pUpdateStructs = pSrcSet->pUpdateStructs;
4110                    }
4111                }
4112            }
4113        }
4114    }
4115    return skipCall;
4116}
4117
4118// Verify that given pool has descriptors that are being requested for allocation.
4119// NOTE : Calls to this function should be wrapped in mutex
4120static VkBool32 validate_descriptor_availability_in_pool(layer_data *dev_data, DESCRIPTOR_POOL_NODE *pPoolNode, uint32_t count,
4121                                                         const VkDescriptorSetLayout *pSetLayouts) {
4122    VkBool32 skipCall = VK_FALSE;
4123    uint32_t i = 0;
4124    uint32_t j = 0;
4125
4126    // Track number of descriptorSets allowable in this pool
4127    if (pPoolNode->availableSets < count) {
4128        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4129                            reinterpret_cast<uint64_t &>(pPoolNode->pool), __LINE__, DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
4130                            "Unable to allocate %u descriptorSets from pool %#" PRIxLEAST64
4131                            ". This pool only has %d descriptorSets remaining.",
4132                            count, reinterpret_cast<uint64_t &>(pPoolNode->pool), pPoolNode->availableSets);
4133    } else {
4134        pPoolNode->availableSets -= count;
4135    }
4136
4137    for (i = 0; i < count; ++i) {
4138        LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pSetLayouts[i]);
4139        if (NULL == pLayout) {
4140            skipCall |=
4141                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
4142                        (uint64_t)pSetLayouts[i], __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
4143                        "Unable to find set layout node for layout %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
4144                        (uint64_t)pSetLayouts[i]);
4145        } else {
4146            uint32_t typeIndex = 0, poolSizeCount = 0;
4147            for (j = 0; j < pLayout->createInfo.bindingCount; ++j) {
4148                typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType);
4149                poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount;
4150                if (poolSizeCount > pPoolNode->availableDescriptorTypeCount[typeIndex]) {
4151                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4152                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pLayout->layout, __LINE__,
4153                                        DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
4154                                        "Unable to allocate %u descriptors of type %s from pool %#" PRIxLEAST64
4155                                        ". This pool only has %d descriptors of this type remaining.",
4156                                        poolSizeCount, string_VkDescriptorType(pLayout->createInfo.pBindings[j].descriptorType),
4157                                        (uint64_t)pPoolNode->pool, pPoolNode->availableDescriptorTypeCount[typeIndex]);
4158                } else { // Decrement available descriptors of this type
4159                    pPoolNode->availableDescriptorTypeCount[typeIndex] -= poolSizeCount;
4160                }
4161            }
4162        }
4163    }
4164    return skipCall;
4165}
4166
4167// Free the shadowed update node for this Set
4168// NOTE : Calls to this function should be wrapped in mutex
4169static void freeShadowUpdateTree(SET_NODE *pSet) {
4170    GENERIC_HEADER *pShadowUpdate = pSet->pUpdateStructs;
4171    pSet->pUpdateStructs = NULL;
4172    GENERIC_HEADER *pFreeUpdate = pShadowUpdate;
4173    // Clear the descriptor mappings as they will now be invalid
4174    pSet->pDescriptorUpdates.clear();
4175    while (pShadowUpdate) {
4176        pFreeUpdate = pShadowUpdate;
4177        pShadowUpdate = (GENERIC_HEADER *)pShadowUpdate->pNext;
4178        VkWriteDescriptorSet *pWDS = NULL;
4179        switch (pFreeUpdate->sType) {
4180        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
4181            pWDS = (VkWriteDescriptorSet *)pFreeUpdate;
4182            switch (pWDS->descriptorType) {
4183            case VK_DESCRIPTOR_TYPE_SAMPLER:
4184            case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
4185            case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
4186            case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
4187                delete[] pWDS->pImageInfo;
4188            } break;
4189            case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
4190            case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
4191                delete[] pWDS->pTexelBufferView;
4192            } break;
4193            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
4194            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
4195            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
4196            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
4197                delete[] pWDS->pBufferInfo;
4198            } break;
4199            default:
4200                break;
4201            }
4202            break;
4203        case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
4204            break;
4205        default:
4206            assert(0);
4207            break;
4208        }
4209        delete pFreeUpdate;
4210    }
4211}
4212
4213// Free all DS Pools including their Sets & related sub-structs
4214// NOTE : Calls to this function should be wrapped in mutex
4215static void deletePools(layer_data *my_data) {
4216    if (my_data->descriptorPoolMap.size() <= 0)
4217        return;
4218    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
4219        SET_NODE *pSet = (*ii).second->pSets;
4220        SET_NODE *pFreeSet = pSet;
4221        while (pSet) {
4222            pFreeSet = pSet;
4223            pSet = pSet->pNext;
4224            // Freeing layouts handled in deleteLayouts() function
4225            // Free Update shadow struct tree
4226            freeShadowUpdateTree(pFreeSet);
4227            delete pFreeSet;
4228        }
4229        delete (*ii).second;
4230    }
4231    my_data->descriptorPoolMap.clear();
4232}
4233
4234// WARN : Once deleteLayouts() called, any layout ptrs in Pool/Set data structure will be invalid
4235// NOTE : Calls to this function should be wrapped in mutex
4236static void deleteLayouts(layer_data *my_data) {
4237    if (my_data->descriptorSetLayoutMap.size() <= 0)
4238        return;
4239    for (auto ii = my_data->descriptorSetLayoutMap.begin(); ii != my_data->descriptorSetLayoutMap.end(); ++ii) {
4240        LAYOUT_NODE *pLayout = (*ii).second;
4241        if (pLayout->createInfo.pBindings) {
4242            for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
4243                delete[] pLayout->createInfo.pBindings[i].pImmutableSamplers;
4244            }
4245            delete[] pLayout->createInfo.pBindings;
4246        }
4247        delete pLayout;
4248    }
4249    my_data->descriptorSetLayoutMap.clear();
4250}
4251
4252// Currently clearing a set is removing all previous updates to that set
4253//  TODO : Validate if this is correct clearing behavior
4254static void clearDescriptorSet(layer_data *my_data, VkDescriptorSet set) {
4255    SET_NODE *pSet = getSetNode(my_data, set);
4256    if (!pSet) {
4257        // TODO : Return error
4258    } else {
4259        freeShadowUpdateTree(pSet);
4260    }
4261}
4262
4263static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
4264                                VkDescriptorPoolResetFlags flags) {
4265    DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
4266    if (!pPool) {
4267        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4268                (uint64_t)pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
4269                "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t)pool);
4270    } else {
4271        // TODO: validate flags
4272        // For every set off of this pool, clear it
4273        SET_NODE *pSet = pPool->pSets;
4274        while (pSet) {
4275            clearDescriptorSet(my_data, pSet->set);
4276            pSet = pSet->pNext;
4277        }
4278        // Reset available count for each type and available sets for this pool
4279        for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
4280            pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
4281        }
4282        pPool->availableSets = pPool->maxSets;
4283    }
4284}
4285
4286// For given CB object, fetch associated CB Node from map
4287static GLOBAL_CB_NODE *getCBNode(layer_data *my_data, const VkCommandBuffer cb) {
4288    if (my_data->commandBufferMap.count(cb) == 0) {
4289        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4290                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4291                "Attempt to use CommandBuffer %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
4292        return NULL;
4293    }
4294    return my_data->commandBufferMap[cb];
4295}
4296
4297// Free all CB Nodes
4298// NOTE : Calls to this function should be wrapped in mutex
4299static void deleteCommandBuffers(layer_data *my_data) {
4300    if (my_data->commandBufferMap.size() <= 0) {
4301        return;
4302    }
4303    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
4304        delete (*ii).second;
4305    }
4306    my_data->commandBufferMap.clear();
4307}
4308
4309static VkBool32 report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
4310    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4311                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
4312                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
4313}
4314
4315VkBool32 validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
4316    if (!pCB->activeRenderPass)
4317        return VK_FALSE;
4318    VkBool32 skip_call = VK_FALSE;
4319    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS && cmd_type != CMD_EXECUTECOMMANDS) {
4320        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4321                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4322                             "Commands cannot be called in a subpass using secondary command buffers.");
4323    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
4324        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4325                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4326                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
4327    }
4328    return skip_call;
4329}
4330
4331static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4332    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
4333        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4334                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4335                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4336    return false;
4337}
4338
4339static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4340    if (!(flags & VK_QUEUE_COMPUTE_BIT))
4341        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4342                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4343                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
4344    return false;
4345}
4346
4347static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4348    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
4349        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4350                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4351                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4352    return false;
4353}
4354
4355// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
4356//  in the recording state or if there's an issue with the Cmd ordering
4357static VkBool32 addCmd(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
4358    VkBool32 skipCall = VK_FALSE;
4359    auto pool_data = my_data->commandPoolMap.find(pCB->createInfo.commandPool);
4360    if (pool_data != my_data->commandPoolMap.end()) {
4361        VkQueueFlags flags = my_data->physDevProperties.queue_family_properties[pool_data->second.queueFamilyIndex].queueFlags;
4362        switch (cmd) {
4363        case CMD_BINDPIPELINE:
4364        case CMD_BINDPIPELINEDELTA:
4365        case CMD_BINDDESCRIPTORSETS:
4366        case CMD_FILLBUFFER:
4367        case CMD_CLEARCOLORIMAGE:
4368        case CMD_SETEVENT:
4369        case CMD_RESETEVENT:
4370        case CMD_WAITEVENTS:
4371        case CMD_BEGINQUERY:
4372        case CMD_ENDQUERY:
4373        case CMD_RESETQUERYPOOL:
4374        case CMD_COPYQUERYPOOLRESULTS:
4375        case CMD_WRITETIMESTAMP:
4376            skipCall |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4377            break;
4378        case CMD_SETVIEWPORTSTATE:
4379        case CMD_SETSCISSORSTATE:
4380        case CMD_SETLINEWIDTHSTATE:
4381        case CMD_SETDEPTHBIASSTATE:
4382        case CMD_SETBLENDSTATE:
4383        case CMD_SETDEPTHBOUNDSSTATE:
4384        case CMD_SETSTENCILREADMASKSTATE:
4385        case CMD_SETSTENCILWRITEMASKSTATE:
4386        case CMD_SETSTENCILREFERENCESTATE:
4387        case CMD_BINDINDEXBUFFER:
4388        case CMD_BINDVERTEXBUFFER:
4389        case CMD_DRAW:
4390        case CMD_DRAWINDEXED:
4391        case CMD_DRAWINDIRECT:
4392        case CMD_DRAWINDEXEDINDIRECT:
4393        case CMD_BLITIMAGE:
4394        case CMD_CLEARATTACHMENTS:
4395        case CMD_CLEARDEPTHSTENCILIMAGE:
4396        case CMD_RESOLVEIMAGE:
4397        case CMD_BEGINRENDERPASS:
4398        case CMD_NEXTSUBPASS:
4399        case CMD_ENDRENDERPASS:
4400            skipCall |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
4401            break;
4402        case CMD_DISPATCH:
4403        case CMD_DISPATCHINDIRECT:
4404            skipCall |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4405            break;
4406        case CMD_COPYBUFFER:
4407        case CMD_COPYIMAGE:
4408        case CMD_COPYBUFFERTOIMAGE:
4409        case CMD_COPYIMAGETOBUFFER:
4410        case CMD_CLONEIMAGEDATA:
4411        case CMD_UPDATEBUFFER:
4412        case CMD_PIPELINEBARRIER:
4413        case CMD_EXECUTECOMMANDS:
4414            break;
4415        default:
4416            break;
4417        }
4418    }
4419    if (pCB->state != CB_RECORDING) {
4420        skipCall |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
4421        skipCall |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
4422        CMD_NODE cmdNode = {};
4423        // init cmd node and append to end of cmd LL
4424        cmdNode.cmdNumber = ++pCB->numCmds;
4425        cmdNode.type = cmd;
4426        pCB->cmds.push_back(cmdNode);
4427    }
4428    return skipCall;
4429}
4430// Reset the command buffer state
4431//  Maintain the createInfo and set state to CB_NEW, but clear all other state
4432static void resetCB(layer_data *my_data, const VkCommandBuffer cb) {
4433    GLOBAL_CB_NODE *pCB = my_data->commandBufferMap[cb];
4434    if (pCB) {
4435        pCB->cmds.clear();
4436        // Reset CB state (note that createInfo is not cleared)
4437        pCB->commandBuffer = cb;
4438        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
4439        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
4440        pCB->numCmds = 0;
4441        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
4442        pCB->state = CB_NEW;
4443        pCB->submitCount = 0;
4444        pCB->status = 0;
4445        pCB->viewports.clear();
4446        pCB->scissors.clear();
4447        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4448            // Before clearing lastBoundState, remove any CB bindings from all uniqueBoundSets
4449            for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4450                auto set_node = my_data->setMap.find(set);
4451                if (set_node != my_data->setMap.end()) {
4452                    set_node->second->boundCmdBuffers.erase(pCB->commandBuffer);
4453                }
4454            }
4455            pCB->lastBound[i].reset();
4456        }
4457        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
4458        pCB->activeRenderPass = 0;
4459        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
4460        pCB->activeSubpass = 0;
4461        pCB->framebuffer = 0;
4462        pCB->fenceId = 0;
4463        pCB->lastSubmittedFence = VK_NULL_HANDLE;
4464        pCB->lastSubmittedQueue = VK_NULL_HANDLE;
4465        pCB->destroyedSets.clear();
4466        pCB->updatedSets.clear();
4467        pCB->destroyedFramebuffers.clear();
4468        pCB->waitedEvents.clear();
4469        pCB->semaphores.clear();
4470        pCB->events.clear();
4471        pCB->waitedEventsBeforeQueryReset.clear();
4472        pCB->queryToStateMap.clear();
4473        pCB->activeQueries.clear();
4474        pCB->startedQueries.clear();
4475        pCB->imageLayoutMap.clear();
4476        pCB->eventToStageMap.clear();
4477        pCB->drawData.clear();
4478        pCB->currentDrawData.buffers.clear();
4479        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
4480        pCB->secondaryCommandBuffers.clear();
4481        pCB->updateImages.clear();
4482        pCB->updateBuffers.clear();
4483        pCB->validate_functions.clear();
4484        pCB->memObjs.clear();
4485        pCB->eventUpdates.clear();
4486    }
4487}
4488
4489// Set PSO-related status bits for CB, including dynamic state set via PSO
4490static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
4491    // Account for any dynamic state not set via this PSO
4492    if (!pPipe->dynStateCI.dynamicStateCount) { // All state is static
4493        pCB->status = CBSTATUS_ALL;
4494    } else {
4495        // First consider all state on
4496        // Then unset any state that's noted as dynamic in PSO
4497        // Finally OR that into CB statemask
4498        CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
4499        for (uint32_t i = 0; i < pPipe->dynStateCI.dynamicStateCount; i++) {
4500            switch (pPipe->dynStateCI.pDynamicStates[i]) {
4501            case VK_DYNAMIC_STATE_VIEWPORT:
4502                psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET;
4503                break;
4504            case VK_DYNAMIC_STATE_SCISSOR:
4505                psoDynStateMask &= ~CBSTATUS_SCISSOR_SET;
4506                break;
4507            case VK_DYNAMIC_STATE_LINE_WIDTH:
4508                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
4509                break;
4510            case VK_DYNAMIC_STATE_DEPTH_BIAS:
4511                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
4512                break;
4513            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
4514                psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
4515                break;
4516            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
4517                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
4518                break;
4519            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
4520                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4521                break;
4522            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4523                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4524                break;
4525            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4526                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4527                break;
4528            default:
4529                // TODO : Flag error here
4530                break;
4531            }
4532        }
4533        pCB->status |= psoDynStateMask;
4534    }
4535}
4536
4537// Print the last bound Gfx Pipeline
4538static VkBool32 printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
4539    VkBool32 skipCall = VK_FALSE;
4540    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4541    if (pCB) {
4542        PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
4543        if (!pPipeTrav) {
4544            // nothing to print
4545        } else {
4546            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4547                                __LINE__, DRAWSTATE_NONE, "DS", "%s",
4548                                vk_print_vkgraphicspipelinecreateinfo(&pPipeTrav->graphicsPipelineCI, "{DS}").c_str());
4549        }
4550    }
4551    return skipCall;
4552}
4553
4554static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
4555    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4556    if (pCB && pCB->cmds.size() > 0) {
4557        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4558                DRAWSTATE_NONE, "DS", "Cmds in CB %p", (void *)cb);
4559        vector<CMD_NODE> cmds = pCB->cmds;
4560        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
4561            // TODO : Need to pass cb as srcObj here
4562            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4563                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD#%" PRIu64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
4564        }
4565    } else {
4566        // Nothing to print
4567    }
4568}
4569
4570static VkBool32 synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
4571    VkBool32 skipCall = VK_FALSE;
4572    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
4573        return skipCall;
4574    }
4575    skipCall |= printPipeline(my_data, cb);
4576    return skipCall;
4577}
4578
4579// Flags validation error if the associated call is made inside a render pass. The apiName
4580// routine should ONLY be called outside a render pass.
4581static VkBool32 insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4582    VkBool32 inside = VK_FALSE;
4583    if (pCB->activeRenderPass) {
4584        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4585                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
4586                         "%s: It is invalid to issue this call inside an active render pass (%#" PRIxLEAST64 ")", apiName,
4587                         (uint64_t)pCB->activeRenderPass);
4588    }
4589    return inside;
4590}
4591
4592// Flags validation error if the associated call is made outside a render pass. The apiName
4593// routine should ONLY be called inside a render pass.
4594static VkBool32 outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4595    VkBool32 outside = VK_FALSE;
4596    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4597        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4598         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4599        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4600                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
4601                          "%s: This call must be issued inside an active render pass.", apiName);
4602    }
4603    return outside;
4604}
4605
4606static void init_core_validation(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
4607
4608    layer_debug_actions(my_data->report_data, my_data->logging_callback, pAllocator, "lunarg_core_validation");
4609
4610    if (!globalLockInitialized) {
4611        loader_platform_thread_create_mutex(&globalLock);
4612        globalLockInitialized = 1;
4613    }
4614#if MTMERGESOURCE
4615    // Zero out memory property data
4616    memset(&memProps, 0, sizeof(VkPhysicalDeviceMemoryProperties));
4617#endif
4618}
4619
4620VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4621vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
4622    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4623
4624    assert(chain_info->u.pLayerInfo);
4625    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4626    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4627    if (fpCreateInstance == NULL)
4628        return VK_ERROR_INITIALIZATION_FAILED;
4629
4630    // Advance the link info for the next element on the chain
4631    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4632
4633    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4634    if (result != VK_SUCCESS)
4635        return result;
4636
4637    layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
4638    my_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
4639    layer_init_instance_dispatch_table(*pInstance, my_data->instance_dispatch_table, fpGetInstanceProcAddr);
4640
4641    my_data->report_data = debug_report_create_instance(my_data->instance_dispatch_table, *pInstance,
4642                                                        pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
4643
4644    init_core_validation(my_data, pAllocator);
4645
4646    ValidateLayerOrdering(*pCreateInfo);
4647
4648    return result;
4649}
4650
4651/* hook DestroyInstance to remove tableInstanceMap entry */
4652VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
4653    // TODOSC : Shouldn't need any customization here
4654    dispatch_key key = get_dispatch_key(instance);
4655    // TBD: Need any locking this early, in case this function is called at the
4656    // same time by more than one thread?
4657    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4658    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
4659    pTable->DestroyInstance(instance, pAllocator);
4660
4661    loader_platform_thread_lock_mutex(&globalLock);
4662    // Clean up logging callback, if any
4663    while (my_data->logging_callback.size() > 0) {
4664        VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
4665        layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
4666        my_data->logging_callback.pop_back();
4667    }
4668
4669    layer_debug_report_destroy_instance(my_data->report_data);
4670    delete my_data->instance_dispatch_table;
4671    layer_data_map.erase(key);
4672    loader_platform_thread_unlock_mutex(&globalLock);
4673    if (layer_data_map.empty()) {
4674        // Release mutex when destroying last instance.
4675        loader_platform_thread_delete_mutex(&globalLock);
4676        globalLockInitialized = 0;
4677    }
4678}
4679
4680static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
4681    uint32_t i;
4682    // TBD: Need any locking, in case this function is called at the same time
4683    // by more than one thread?
4684    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4685    dev_data->device_extensions.wsi_enabled = false;
4686
4687    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4688    PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
4689    pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
4690    pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
4691    pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
4692    pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
4693    pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
4694
4695    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4696        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4697            dev_data->device_extensions.wsi_enabled = true;
4698    }
4699}
4700
4701VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4702                                                              const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4703    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4704
4705    assert(chain_info->u.pLayerInfo);
4706    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4707    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4708    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
4709    if (fpCreateDevice == NULL) {
4710        return VK_ERROR_INITIALIZATION_FAILED;
4711    }
4712
4713    // Advance the link info for the next element on the chain
4714    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4715
4716    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4717    if (result != VK_SUCCESS) {
4718        return result;
4719    }
4720
4721    loader_platform_thread_lock_mutex(&globalLock);
4722    layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
4723    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4724
4725    // Setup device dispatch table
4726    my_device_data->device_dispatch_table = new VkLayerDispatchTable;
4727    layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
4728    my_device_data->device = *pDevice;
4729
4730    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4731    createDeviceRegisterExtensions(pCreateInfo, *pDevice);
4732    // Get physical device limits for this device
4733    my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->physDevProperties.properties));
4734    uint32_t count;
4735    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4736    my_device_data->physDevProperties.queue_family_properties.resize(count);
4737    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
4738        gpu, &count, &my_device_data->physDevProperties.queue_family_properties[0]);
4739    // TODO: device limits should make sure these are compatible
4740    if (pCreateInfo->pEnabledFeatures) {
4741        my_device_data->physDevProperties.features = *pCreateInfo->pEnabledFeatures;
4742    } else {
4743        memset(&my_device_data->physDevProperties.features, 0, sizeof(VkPhysicalDeviceFeatures));
4744    }
4745    loader_platform_thread_unlock_mutex(&globalLock);
4746
4747    ValidateLayerOrdering(*pCreateInfo);
4748
4749    return result;
4750}
4751
4752// prototype
4753static void deleteRenderPasses(layer_data *);
4754VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4755    // TODOSC : Shouldn't need any customization here
4756    dispatch_key key = get_dispatch_key(device);
4757    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4758    // Free all the memory
4759    loader_platform_thread_lock_mutex(&globalLock);
4760    deletePipelines(dev_data);
4761    deleteRenderPasses(dev_data);
4762    deleteCommandBuffers(dev_data);
4763    deletePools(dev_data);
4764    deleteLayouts(dev_data);
4765    dev_data->imageViewMap.clear();
4766    dev_data->imageMap.clear();
4767    dev_data->imageSubresourceMap.clear();
4768    dev_data->imageLayoutMap.clear();
4769    dev_data->bufferViewMap.clear();
4770    dev_data->bufferMap.clear();
4771    loader_platform_thread_unlock_mutex(&globalLock);
4772#if MTMERGESOURCE
4773    VkBool32 skipCall = VK_FALSE;
4774    loader_platform_thread_lock_mutex(&globalLock);
4775    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4776            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4777    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4778            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4779    print_mem_list(dev_data, device);
4780    printCBList(dev_data, device);
4781    delete_cmd_buf_info_list(dev_data);
4782    // Report any memory leaks
4783    DEVICE_MEM_INFO *pInfo = NULL;
4784    if (dev_data->memObjMap.size() > 0) {
4785        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4786            pInfo = &(*ii).second;
4787            if (pInfo->allocInfo.allocationSize != 0) {
4788                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4789                skipCall |=
4790                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4791                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK,
4792                            "MEM", "Mem Object %" PRIu64 " has not been freed. You should clean up this memory by calling "
4793                                   "vkFreeMemory(%" PRIu64 ") prior to vkDestroyDevice().",
4794                            (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4795            }
4796        }
4797    }
4798    // Queues persist until device is destroyed
4799    delete_queue_info_list(dev_data);
4800    layer_debug_report_destroy_device(device);
4801    loader_platform_thread_unlock_mutex(&globalLock);
4802
4803#if DISPATCH_MAP_DEBUG
4804    fprintf(stderr, "Device: %p, key: %p\n", device, key);
4805#endif
4806    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4807    if (VK_FALSE == skipCall) {
4808        pDisp->DestroyDevice(device, pAllocator);
4809    }
4810#else
4811    dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
4812#endif
4813    delete dev_data->device_dispatch_table;
4814    layer_data_map.erase(key);
4815}
4816
4817#if MTMERGESOURCE
4818VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
4819vkGetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties *pMemoryProperties) {
4820    layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
4821    VkLayerInstanceDispatchTable *pInstanceTable = my_data->instance_dispatch_table;
4822    pInstanceTable->GetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties);
4823    memcpy(&memProps, pMemoryProperties, sizeof(VkPhysicalDeviceMemoryProperties));
4824}
4825#endif
4826
4827static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4828
4829VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4830vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
4831    return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
4832}
4833
4834VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4835vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
4836    return util_GetLayerProperties(ARRAY_SIZE(cv_global_layers), cv_global_layers, pCount, pProperties);
4837}
4838
4839// TODO: Why does this exist - can we just use global?
4840static const VkLayerProperties cv_device_layers[] = {{
4841    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
4842}};
4843
4844VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
4845                                                                                    const char *pLayerName, uint32_t *pCount,
4846                                                                                    VkExtensionProperties *pProperties) {
4847    if (pLayerName == NULL) {
4848        dispatch_key key = get_dispatch_key(physicalDevice);
4849        layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4850        return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
4851    } else {
4852        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
4853    }
4854}
4855
4856VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4857vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
4858    /* draw_state physical device layers are the same as global */
4859    return util_GetLayerProperties(ARRAY_SIZE(cv_device_layers), cv_device_layers, pCount, pProperties);
4860}
4861
4862// This validates that the initial layout specified in the command buffer for
4863// the IMAGE is the same
4864// as the global IMAGE layout
4865VkBool32 ValidateCmdBufImageLayouts(VkCommandBuffer cmdBuffer) {
4866    VkBool32 skip_call = VK_FALSE;
4867    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
4868    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
4869    for (auto cb_image_data : pCB->imageLayoutMap) {
4870        VkImageLayout imageLayout;
4871        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4872            skip_call |=
4873                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4874                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image %" PRIu64 ".",
4875                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4876        } else {
4877            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4878                // TODO: Set memory invalid which is in mem_tracker currently
4879            } else if (imageLayout != cb_image_data.second.initialLayout) {
4880                skip_call |=
4881                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4882                            reinterpret_cast<uint64_t &>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4883                            "Cannot submit cmd buffer using image (%" PRIx64 ") with layout %s when "
4884                            "first use is %s.",
4885                            reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4886                            string_VkImageLayout(cb_image_data.second.initialLayout));
4887            }
4888            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4889        }
4890    }
4891    return skip_call;
4892}
4893
4894// Track which resources are in-flight by atomically incrementing their "in_use" count
4895VkBool32 validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB) {
4896    VkBool32 skip_call = VK_FALSE;
4897    for (auto drawDataElement : pCB->drawData) {
4898        for (auto buffer : drawDataElement.buffers) {
4899            auto buffer_data = my_data->bufferMap.find(buffer);
4900            if (buffer_data == my_data->bufferMap.end()) {
4901                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4902                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4903                                     "Cannot submit cmd buffer using deleted buffer %" PRIu64 ".", (uint64_t)(buffer));
4904            } else {
4905                buffer_data->second.in_use.fetch_add(1);
4906            }
4907        }
4908    }
4909    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4910        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4911            auto setNode = my_data->setMap.find(set);
4912            if (setNode == my_data->setMap.end()) {
4913                skip_call |=
4914                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4915                            (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS",
4916                            "Cannot submit cmd buffer using deleted descriptor set %" PRIu64 ".", (uint64_t)(set));
4917            } else {
4918                setNode->second->in_use.fetch_add(1);
4919            }
4920        }
4921    }
4922    for (auto semaphore : pCB->semaphores) {
4923        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4924        if (semaphoreNode == my_data->semaphoreMap.end()) {
4925            skip_call |=
4926                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4927                        reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4928                        "Cannot submit cmd buffer using deleted semaphore %" PRIu64 ".", reinterpret_cast<uint64_t &>(semaphore));
4929        } else {
4930            semaphoreNode->second.in_use.fetch_add(1);
4931        }
4932    }
4933    for (auto event : pCB->events) {
4934        auto eventNode = my_data->eventMap.find(event);
4935        if (eventNode == my_data->eventMap.end()) {
4936            skip_call |=
4937                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4938                        reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4939                        "Cannot submit cmd buffer using deleted event %" PRIu64 ".", reinterpret_cast<uint64_t &>(event));
4940        } else {
4941            eventNode->second.in_use.fetch_add(1);
4942        }
4943    }
4944    return skip_call;
4945}
4946
4947void decrementResources(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4948    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4949    for (auto drawDataElement : pCB->drawData) {
4950        for (auto buffer : drawDataElement.buffers) {
4951            auto buffer_data = my_data->bufferMap.find(buffer);
4952            if (buffer_data != my_data->bufferMap.end()) {
4953                buffer_data->second.in_use.fetch_sub(1);
4954            }
4955        }
4956    }
4957    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4958        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4959            auto setNode = my_data->setMap.find(set);
4960            if (setNode != my_data->setMap.end()) {
4961                setNode->second->in_use.fetch_sub(1);
4962            }
4963        }
4964    }
4965    for (auto semaphore : pCB->semaphores) {
4966        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4967        if (semaphoreNode != my_data->semaphoreMap.end()) {
4968            semaphoreNode->second.in_use.fetch_sub(1);
4969        }
4970    }
4971    for (auto event : pCB->events) {
4972        auto eventNode = my_data->eventMap.find(event);
4973        if (eventNode != my_data->eventMap.end()) {
4974            eventNode->second.in_use.fetch_sub(1);
4975        }
4976    }
4977    for (auto queryStatePair : pCB->queryToStateMap) {
4978        my_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4979    }
4980    for (auto eventStagePair : pCB->eventToStageMap) {
4981        my_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4982    }
4983}
4984
4985void decrementResources(layer_data *my_data, uint32_t fenceCount, const VkFence *pFences) {
4986    for (uint32_t i = 0; i < fenceCount; ++i) {
4987        auto fence_data = my_data->fenceMap.find(pFences[i]);
4988        if (fence_data == my_data->fenceMap.end() || !fence_data->second.needsSignaled)
4989            return;
4990        fence_data->second.needsSignaled = false;
4991        fence_data->second.in_use.fetch_sub(1);
4992        decrementResources(my_data, fence_data->second.priorFences.size(), fence_data->second.priorFences.data());
4993        for (auto cmdBuffer : fence_data->second.cmdBuffers) {
4994            decrementResources(my_data, cmdBuffer);
4995        }
4996    }
4997}
4998
4999void decrementResources(layer_data *my_data, VkQueue queue) {
5000    auto queue_data = my_data->queueMap.find(queue);
5001    if (queue_data != my_data->queueMap.end()) {
5002        for (auto cmdBuffer : queue_data->second.untrackedCmdBuffers) {
5003            decrementResources(my_data, cmdBuffer);
5004        }
5005        queue_data->second.untrackedCmdBuffers.clear();
5006        decrementResources(my_data, queue_data->second.lastFences.size(), queue_data->second.lastFences.data());
5007    }
5008}
5009
5010void updateTrackedCommandBuffers(layer_data *dev_data, VkQueue queue, VkQueue other_queue, VkFence fence) {
5011    if (queue == other_queue) {
5012        return;
5013    }
5014    auto queue_data = dev_data->queueMap.find(queue);
5015    auto other_queue_data = dev_data->queueMap.find(other_queue);
5016    if (queue_data == dev_data->queueMap.end() || other_queue_data == dev_data->queueMap.end()) {
5017        return;
5018    }
5019    for (auto fence : other_queue_data->second.lastFences) {
5020        queue_data->second.lastFences.push_back(fence);
5021    }
5022    if (fence != VK_NULL_HANDLE) {
5023        auto fence_data = dev_data->fenceMap.find(fence);
5024        if (fence_data == dev_data->fenceMap.end()) {
5025            return;
5026        }
5027        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
5028            fence_data->second.cmdBuffers.push_back(cmdbuffer);
5029        }
5030        other_queue_data->second.untrackedCmdBuffers.clear();
5031    } else {
5032        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
5033            queue_data->second.untrackedCmdBuffers.push_back(cmdbuffer);
5034        }
5035        other_queue_data->second.untrackedCmdBuffers.clear();
5036    }
5037    for (auto eventStagePair : other_queue_data->second.eventToStageMap) {
5038        queue_data->second.eventToStageMap[eventStagePair.first] = eventStagePair.second;
5039    }
5040}
5041
5042void trackCommandBuffers(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
5043    auto queue_data = my_data->queueMap.find(queue);
5044    if (fence != VK_NULL_HANDLE) {
5045        vector<VkFence> prior_fences;
5046        auto fence_data = my_data->fenceMap.find(fence);
5047        if (fence_data == my_data->fenceMap.end()) {
5048            return;
5049        }
5050        if (queue_data != my_data->queueMap.end()) {
5051            prior_fences = queue_data->second.lastFences;
5052            queue_data->second.lastFences.clear();
5053            queue_data->second.lastFences.push_back(fence);
5054            for (auto cmdbuffer : queue_data->second.untrackedCmdBuffers) {
5055                fence_data->second.cmdBuffers.push_back(cmdbuffer);
5056            }
5057            queue_data->second.untrackedCmdBuffers.clear();
5058        }
5059        fence_data->second.cmdBuffers.clear();
5060        fence_data->second.priorFences = prior_fences;
5061        fence_data->second.needsSignaled = true;
5062        fence_data->second.queue = queue;
5063        fence_data->second.in_use.fetch_add(1);
5064        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5065            const VkSubmitInfo *submit = &pSubmits[submit_idx];
5066            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5067                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5068                    fence_data->second.cmdBuffers.push_back(secondaryCmdBuffer);
5069                }
5070                fence_data->second.cmdBuffers.push_back(submit->pCommandBuffers[i]);
5071            }
5072        }
5073    } else {
5074        if (queue_data != my_data->queueMap.end()) {
5075            for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5076                const VkSubmitInfo *submit = &pSubmits[submit_idx];
5077                for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5078                    for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5079                        queue_data->second.untrackedCmdBuffers.push_back(secondaryCmdBuffer);
5080                    }
5081                    queue_data->second.untrackedCmdBuffers.push_back(submit->pCommandBuffers[i]);
5082                }
5083            }
5084        }
5085    }
5086    if (queue_data != my_data->queueMap.end()) {
5087        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5088            const VkSubmitInfo *submit = &pSubmits[submit_idx];
5089            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5090                // Add cmdBuffers to both the global set and queue set
5091                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5092                    my_data->globalInFlightCmdBuffers.insert(secondaryCmdBuffer);
5093                    queue_data->second.inFlightCmdBuffers.insert(secondaryCmdBuffer);
5094                }
5095                my_data->globalInFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
5096                queue_data->second.inFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
5097            }
5098        }
5099    }
5100}
5101
5102bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5103    bool skip_call = false;
5104    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
5105        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
5106        skip_call |=
5107            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5108                    __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
5109                    "Command Buffer %#" PRIx64 " is already in use and is not marked for simultaneous use.",
5110                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
5111    }
5112    return skip_call;
5113}
5114
5115static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5116    bool skipCall = false;
5117    // Validate that cmd buffers have been updated
5118    if (CB_RECORDED != pCB->state) {
5119        if (CB_INVALID == pCB->state) {
5120            // Inform app of reason CB invalid
5121            bool causeReported = false;
5122            if (!pCB->destroyedSets.empty()) {
5123                std::stringstream set_string;
5124                for (auto set : pCB->destroyedSets)
5125                    set_string << " " << set;
5126
5127                skipCall |=
5128                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5129                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5130                            "You are submitting command buffer %#" PRIxLEAST64
5131                            " that is invalid because it had the following bound descriptor set(s) destroyed: %s",
5132                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5133                causeReported = true;
5134            }
5135            if (!pCB->updatedSets.empty()) {
5136                std::stringstream set_string;
5137                for (auto set : pCB->updatedSets)
5138                    set_string << " " << set;
5139
5140                skipCall |=
5141                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5142                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5143                            "You are submitting command buffer %#" PRIxLEAST64
5144                            " that is invalid because it had the following bound descriptor set(s) updated: %s",
5145                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5146                causeReported = true;
5147            }
5148            if (!pCB->destroyedFramebuffers.empty()) {
5149                std::stringstream fb_string;
5150                for (auto fb : pCB->destroyedFramebuffers)
5151                    fb_string << " " << fb;
5152
5153                skipCall |=
5154                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5155                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5156                            "You are submitting command buffer %#" PRIxLEAST64 " that is invalid because it had the following "
5157                            "referenced framebuffers destroyed: %s",
5158                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), fb_string.str().c_str());
5159                causeReported = true;
5160            }
5161            // TODO : This is defensive programming to make sure an error is
5162            //  flagged if we hit this INVALID cmd buffer case and none of the
5163            //  above cases are hit. As the number of INVALID cases grows, this
5164            //  code should be updated to seemlessly handle all the cases.
5165            if (!causeReported) {
5166                skipCall |= log_msg(
5167                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5168                    reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5169                    "You are submitting command buffer %#" PRIxLEAST64 " that is invalid due to an unknown cause. Validation "
5170                    "should "
5171                    "be improved to report the exact cause.",
5172                    reinterpret_cast<uint64_t &>(pCB->commandBuffer));
5173            }
5174        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
5175            skipCall |=
5176                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5177                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
5178                        "You must call vkEndCommandBuffer() on CB %#" PRIxLEAST64 " before this call to vkQueueSubmit()!",
5179                        (uint64_t)(pCB->commandBuffer));
5180        }
5181    }
5182    return skipCall;
5183}
5184
5185static VkBool32 validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5186    // Track in-use for resources off of primary and any secondary CBs
5187    VkBool32 skipCall = validateAndIncrementResources(dev_data, pCB);
5188    if (!pCB->secondaryCommandBuffers.empty()) {
5189        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
5190            skipCall |= validateAndIncrementResources(dev_data, dev_data->commandBufferMap[secondaryCmdBuffer]);
5191            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
5192            if (pSubCB->primaryCommandBuffer != pCB->commandBuffer) {
5193                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5194                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5195                        "CB %#" PRIxLEAST64 " was submitted with secondary buffer %#" PRIxLEAST64
5196                        " but that buffer has subsequently been bound to "
5197                        "primary cmd buffer %#" PRIxLEAST64 ".",
5198                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
5199                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
5200            }
5201        }
5202    }
5203    // TODO : Verify if this also needs to be checked for secondary command
5204    //  buffers. If so, this block of code can move to
5205    //   validateCommandBufferState() function. vulkan GL106 filed to clarify
5206    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
5207        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5208                            __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5209                            "CB %#" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
5210                            "set, but has been submitted %#" PRIxLEAST64 " times.",
5211                            (uint64_t)(pCB->commandBuffer), pCB->submitCount);
5212    }
5213    skipCall |= validateCommandBufferState(dev_data, pCB);
5214    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
5215    // on device
5216    skipCall |= validateCommandBufferSimultaneousUse(dev_data, pCB);
5217    return skipCall;
5218}
5219
5220VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5221vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
5222    VkBool32 skipCall = VK_FALSE;
5223    GLOBAL_CB_NODE *pCBNode = NULL;
5224    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5225    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5226    loader_platform_thread_lock_mutex(&globalLock);
5227#if MTMERGESOURCE
5228    // TODO : Need to track fence and clear mem references when fence clears
5229    // MTMTODO : Merge this code with code below to avoid duplicating efforts
5230    uint64_t fenceId = 0;
5231    skipCall = add_fence_info(dev_data, fence, queue, &fenceId);
5232
5233    print_mem_list(dev_data, queue);
5234    printCBList(dev_data, queue);
5235    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5236        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5237        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5238            pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5239            if (pCBNode) {
5240                pCBNode->fenceId = fenceId;
5241                pCBNode->lastSubmittedFence = fence;
5242                pCBNode->lastSubmittedQueue = queue;
5243                for (auto &function : pCBNode->validate_functions) {
5244                    skipCall |= function();
5245                }
5246                for (auto &function : pCBNode->eventUpdates) {
5247                    skipCall |= static_cast<VkBool32>(function(queue));
5248                }
5249            }
5250        }
5251
5252        for (uint32_t i = 0; i < submit->waitSemaphoreCount; i++) {
5253            VkSemaphore sem = submit->pWaitSemaphores[i];
5254
5255            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5256                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_SIGNALLED) {
5257                    skipCall =
5258                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5259                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
5260                                "vkQueueSubmit: Semaphore must be in signaled state before passing to pWaitSemaphores");
5261                }
5262                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_WAIT;
5263            }
5264        }
5265        for (uint32_t i = 0; i < submit->signalSemaphoreCount; i++) {
5266            VkSemaphore sem = submit->pSignalSemaphores[i];
5267
5268            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5269                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
5270                    skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5271                                       VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, (uint64_t)sem, __LINE__, MEMTRACK_NONE,
5272                                       "SEMAPHORE", "vkQueueSubmit: Semaphore must not be currently signaled or in a wait state");
5273                }
5274                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
5275            }
5276        }
5277    }
5278#endif
5279    // First verify that fence is not in use
5280    if ((fence != VK_NULL_HANDLE) && (submitCount != 0) && dev_data->fenceMap[fence].in_use.load()) {
5281        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5282                            (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5283                            "Fence %#" PRIx64 " is already in use by another submission.", (uint64_t)(fence));
5284    }
5285    // Now verify each individual submit
5286    std::unordered_set<VkQueue> processed_other_queues;
5287    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5288        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5289        vector<VkSemaphore> semaphoreList;
5290        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
5291            const VkSemaphore &semaphore = submit->pWaitSemaphores[i];
5292            if (dev_data->semaphoreMap[semaphore].signaled) {
5293                dev_data->semaphoreMap[semaphore].signaled = 0;
5294                dev_data->semaphoreMap[semaphore].in_use.fetch_sub(1);
5295            } else {
5296                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5297                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
5298                                    "DS", "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
5299                                    reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
5300            }
5301            const VkQueue &other_queue = dev_data->semaphoreMap[semaphore].queue;
5302            if (other_queue != VK_NULL_HANDLE && !processed_other_queues.count(other_queue)) {
5303                updateTrackedCommandBuffers(dev_data, queue, other_queue, fence);
5304                processed_other_queues.insert(other_queue);
5305            }
5306        }
5307        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
5308            const VkSemaphore &semaphore = submit->pSignalSemaphores[i];
5309            semaphoreList.push_back(semaphore);
5310            if (dev_data->semaphoreMap[semaphore].signaled) {
5311                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5312                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
5313                                    "DS", "Queue %#" PRIx64 " is signaling semaphore %#" PRIx64
5314                                          " that has already been signaled but not waited on by queue %#" PRIx64 ".",
5315                                    reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
5316                                    reinterpret_cast<uint64_t &>(dev_data->semaphoreMap[semaphore].queue));
5317            } else {
5318                dev_data->semaphoreMap[semaphore].signaled = 1;
5319                dev_data->semaphoreMap[semaphore].queue = queue;
5320            }
5321        }
5322        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5323            skipCall |= ValidateCmdBufImageLayouts(submit->pCommandBuffers[i]);
5324            pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5325            pCBNode->semaphores = semaphoreList;
5326            pCBNode->submitCount++; // increment submit count
5327            skipCall |= validatePrimaryCommandBufferState(dev_data, pCBNode);
5328        }
5329    }
5330    // Update cmdBuffer-related data structs and mark fence in-use
5331    trackCommandBuffers(dev_data, queue, submitCount, pSubmits, fence);
5332    loader_platform_thread_unlock_mutex(&globalLock);
5333    if (VK_FALSE == skipCall)
5334        result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence);
5335#if MTMERGESOURCE
5336    loader_platform_thread_lock_mutex(&globalLock);
5337    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5338        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5339        for (uint32_t i = 0; i < submit->waitSemaphoreCount; i++) {
5340            VkSemaphore sem = submit->pWaitSemaphores[i];
5341
5342            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5343                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
5344            }
5345        }
5346    }
5347    loader_platform_thread_unlock_mutex(&globalLock);
5348#endif
5349    return result;
5350}
5351
5352#if MTMERGESOURCE
5353VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
5354                                                                const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
5355    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5356    VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
5357    // TODO : Track allocations and overall size here
5358    loader_platform_thread_lock_mutex(&globalLock);
5359    add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
5360    print_mem_list(my_data, device);
5361    loader_platform_thread_unlock_mutex(&globalLock);
5362    return result;
5363}
5364
5365VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5366vkFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
5367    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5368
5369    // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
5370    // Before freeing a memory object, an application must ensure the memory object is no longer
5371    // in use by the device—for example by command buffers queued for execution. The memory need
5372    // not yet be unbound from all images and buffers, but any further use of those images or
5373    // buffers (on host or device) for anything other than destroying those objects will result in
5374    // undefined behavior.
5375
5376    loader_platform_thread_lock_mutex(&globalLock);
5377    freeMemObjInfo(my_data, device, mem, VK_FALSE);
5378    print_mem_list(my_data, device);
5379    printCBList(my_data, device);
5380    loader_platform_thread_unlock_mutex(&globalLock);
5381    my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator);
5382}
5383
5384VkBool32 validateMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5385    VkBool32 skipCall = VK_FALSE;
5386
5387    if (size == 0) {
5388        // TODO: a size of 0 is not listed as an invalid use in the spec, should it be?
5389        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5390                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5391                           "VkMapMemory: Attempting to map memory range of size zero");
5392    }
5393
5394    auto mem_element = my_data->memObjMap.find(mem);
5395    if (mem_element != my_data->memObjMap.end()) {
5396        // It is an application error to call VkMapMemory on an object that is already mapped
5397        if (mem_element->second.memRange.size != 0) {
5398            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5399                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5400                               "VkMapMemory: Attempting to map memory on an already-mapped object %#" PRIxLEAST64, (uint64_t)mem);
5401        }
5402
5403        // Validate that offset + size is within object's allocationSize
5404        if (size == VK_WHOLE_SIZE) {
5405            if (offset >= mem_element->second.allocInfo.allocationSize) {
5406                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5407                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5408                                   "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5409                                   mem_element->second.allocInfo.allocationSize, mem_element->second.allocInfo.allocationSize);
5410            }
5411        } else {
5412            if ((offset + size) > mem_element->second.allocInfo.allocationSize) {
5413                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5414                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5415                                   "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5416                                   size + offset, mem_element->second.allocInfo.allocationSize);
5417            }
5418        }
5419    }
5420    return skipCall;
5421}
5422
5423void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5424    auto mem_element = my_data->memObjMap.find(mem);
5425    if (mem_element != my_data->memObjMap.end()) {
5426        MemRange new_range;
5427        new_range.offset = offset;
5428        new_range.size = size;
5429        mem_element->second.memRange = new_range;
5430    }
5431}
5432
5433VkBool32 deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
5434    VkBool32 skipCall = VK_FALSE;
5435    auto mem_element = my_data->memObjMap.find(mem);
5436    if (mem_element != my_data->memObjMap.end()) {
5437        if (!mem_element->second.memRange.size) {
5438            // Valid Usage: memory must currently be mapped
5439            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5440                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5441                               "Unmapping Memory without memory being mapped: mem obj %#" PRIxLEAST64, (uint64_t)mem);
5442        }
5443        mem_element->second.memRange.size = 0;
5444        if (mem_element->second.pData) {
5445            free(mem_element->second.pData);
5446            mem_element->second.pData = 0;
5447        }
5448    }
5449    return skipCall;
5450}
5451
5452static char NoncoherentMemoryFillValue = 0xb;
5453
5454void initializeAndTrackMemory(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize size, void **ppData) {
5455    auto mem_element = my_data->memObjMap.find(mem);
5456    if (mem_element != my_data->memObjMap.end()) {
5457        mem_element->second.pDriverData = *ppData;
5458        uint32_t index = mem_element->second.allocInfo.memoryTypeIndex;
5459        if (memProps.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
5460            mem_element->second.pData = 0;
5461        } else {
5462            if (size == VK_WHOLE_SIZE) {
5463                size = mem_element->second.allocInfo.allocationSize;
5464            }
5465            size_t convSize = (size_t)(size);
5466            mem_element->second.pData = malloc(2 * convSize);
5467            memset(mem_element->second.pData, NoncoherentMemoryFillValue, 2 * convSize);
5468            *ppData = static_cast<char *>(mem_element->second.pData) + (convSize / 2);
5469        }
5470    }
5471}
5472#endif
5473// Note: This function assumes that the global lock is held by the calling
5474// thread.
5475VkBool32 cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
5476    VkBool32 skip_call = VK_FALSE;
5477    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
5478    if (pCB) {
5479        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
5480            for (auto event : queryEventsPair.second) {
5481                if (my_data->eventMap[event].needsSignaled) {
5482                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5483                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
5484                                         "Cannot get query results on queryPool %" PRIu64
5485                                         " with index %d which was guarded by unsignaled event %" PRIu64 ".",
5486                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
5487                }
5488            }
5489        }
5490    }
5491    return skip_call;
5492}
5493// Remove given cmd_buffer from the global inFlight set.
5494//  Also, if given queue is valid, then remove the cmd_buffer from that queues
5495//  inFlightCmdBuffer set. Finally, check all other queues and if given cmd_buffer
5496//  is still in flight on another queue, add it back into the global set.
5497// Note: This function assumes that the global lock is held by the calling
5498// thread.
5499static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkQueue queue) {
5500    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
5501    dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
5502    if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) {
5503        dev_data->queueMap[queue].inFlightCmdBuffers.erase(cmd_buffer);
5504        for (auto q : dev_data->queues) {
5505            if ((q != queue) &&
5506                (dev_data->queueMap[q].inFlightCmdBuffers.find(cmd_buffer) != dev_data->queueMap[q].inFlightCmdBuffers.end())) {
5507                dev_data->globalInFlightCmdBuffers.insert(cmd_buffer);
5508                break;
5509            }
5510        }
5511    }
5512}
5513#if MTMERGESOURCE
5514static inline bool verifyFenceStatus(VkDevice device, VkFence fence, const char *apiCall) {
5515    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5516    VkBool32 skipCall = false;
5517    auto pFenceInfo = my_data->fenceMap.find(fence);
5518    if (pFenceInfo != my_data->fenceMap.end()) {
5519        if (pFenceInfo->second.firstTimeFlag != VK_TRUE) {
5520            if ((pFenceInfo->second.createInfo.flags & VK_FENCE_CREATE_SIGNALED_BIT) &&
5521                pFenceInfo->second.firstTimeFlag != VK_TRUE) {
5522                skipCall |=
5523                    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5524                            (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5525                            "%s specified fence %#" PRIxLEAST64 " already in SIGNALED state.", apiCall, (uint64_t)fence);
5526            }
5527            if (!pFenceInfo->second.queue && !pFenceInfo->second.swapchain) { // Checking status of unsubmitted fence
5528                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5529                                    reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5530                                    "%s called for fence %#" PRIxLEAST64 " which has not been submitted on a Queue or during "
5531                                    "acquire next image.",
5532                                    apiCall, reinterpret_cast<uint64_t &>(fence));
5533            }
5534        } else {
5535            pFenceInfo->second.firstTimeFlag = VK_FALSE;
5536        }
5537    }
5538    return skipCall;
5539}
5540#endif
5541VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5542vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
5543    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5544    VkBool32 skip_call = VK_FALSE;
5545#if MTMERGESOURCE
5546    // Verify fence status of submitted fences
5547    loader_platform_thread_lock_mutex(&globalLock);
5548    for (uint32_t i = 0; i < fenceCount; i++) {
5549        skip_call |= verifyFenceStatus(device, pFences[i], "vkWaitForFences");
5550    }
5551    loader_platform_thread_unlock_mutex(&globalLock);
5552    if (skip_call)
5553        return VK_ERROR_VALIDATION_FAILED_EXT;
5554#endif
5555    VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
5556
5557    if (result == VK_SUCCESS) {
5558        loader_platform_thread_lock_mutex(&globalLock);
5559        // When we know that all fences are complete we can clean/remove their CBs
5560        if (waitAll || fenceCount == 1) {
5561            for (uint32_t i = 0; i < fenceCount; ++i) {
5562#if MTMERGESOURCE
5563                update_fence_tracking(dev_data, pFences[i]);
5564#endif
5565                VkQueue fence_queue = dev_data->fenceMap[pFences[i]].queue;
5566                for (auto cmdBuffer : dev_data->fenceMap[pFences[i]].cmdBuffers) {
5567                    skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5568                    removeInFlightCmdBuffer(dev_data, cmdBuffer, fence_queue);
5569                }
5570            }
5571            decrementResources(dev_data, fenceCount, pFences);
5572        }
5573        // NOTE : Alternate case not handled here is when some fences have completed. In
5574        //  this case for app to guarantee which fences completed it will have to call
5575        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
5576        loader_platform_thread_unlock_mutex(&globalLock);
5577    }
5578    if (VK_FALSE != skip_call)
5579        return VK_ERROR_VALIDATION_FAILED_EXT;
5580    return result;
5581}
5582
5583VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device, VkFence fence) {
5584    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5585    bool skipCall = false;
5586    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5587#if MTMERGESOURCE
5588    loader_platform_thread_lock_mutex(&globalLock);
5589    skipCall = verifyFenceStatus(device, fence, "vkGetFenceStatus");
5590    loader_platform_thread_unlock_mutex(&globalLock);
5591    if (skipCall)
5592        return result;
5593#endif
5594    result = dev_data->device_dispatch_table->GetFenceStatus(device, fence);
5595    VkBool32 skip_call = VK_FALSE;
5596    loader_platform_thread_lock_mutex(&globalLock);
5597    if (result == VK_SUCCESS) {
5598#if MTMERGESOURCE
5599        update_fence_tracking(dev_data, fence);
5600#endif
5601        auto fence_queue = dev_data->fenceMap[fence].queue;
5602        for (auto cmdBuffer : dev_data->fenceMap[fence].cmdBuffers) {
5603            skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5604            removeInFlightCmdBuffer(dev_data, cmdBuffer, fence_queue);
5605        }
5606        decrementResources(dev_data, 1, &fence);
5607    }
5608    loader_platform_thread_unlock_mutex(&globalLock);
5609    if (VK_FALSE != skip_call)
5610        return VK_ERROR_VALIDATION_FAILED_EXT;
5611    return result;
5612}
5613
5614VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
5615                                                            VkQueue *pQueue) {
5616    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5617    dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
5618    loader_platform_thread_lock_mutex(&globalLock);
5619
5620    // Add queue to tracking set only if it is new
5621    auto result = dev_data->queues.emplace(*pQueue);
5622    if (result.second == true) {
5623        QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
5624        pQNode->device = device;
5625#if MTMERGESOURCE
5626        pQNode->lastRetiredId = 0;
5627        pQNode->lastSubmittedId = 0;
5628#endif
5629    }
5630
5631    loader_platform_thread_unlock_mutex(&globalLock);
5632}
5633
5634VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue) {
5635    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5636    decrementResources(dev_data, queue);
5637    VkBool32 skip_call = VK_FALSE;
5638    loader_platform_thread_lock_mutex(&globalLock);
5639    // Iterate over local set since we erase set members as we go in for loop
5640    auto local_cb_set = dev_data->queueMap[queue].inFlightCmdBuffers;
5641    for (auto cmdBuffer : local_cb_set) {
5642        skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5643        removeInFlightCmdBuffer(dev_data, cmdBuffer, queue);
5644    }
5645    dev_data->queueMap[queue].inFlightCmdBuffers.clear();
5646    loader_platform_thread_unlock_mutex(&globalLock);
5647    if (VK_FALSE != skip_call)
5648        return VK_ERROR_VALIDATION_FAILED_EXT;
5649    VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue);
5650#if MTMERGESOURCE
5651    if (VK_SUCCESS == result) {
5652        loader_platform_thread_lock_mutex(&globalLock);
5653        retire_queue_fences(dev_data, queue);
5654        loader_platform_thread_unlock_mutex(&globalLock);
5655    }
5656#endif
5657    return result;
5658}
5659
5660VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device) {
5661    VkBool32 skip_call = VK_FALSE;
5662    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5663    loader_platform_thread_lock_mutex(&globalLock);
5664    for (auto queue : dev_data->queues) {
5665        decrementResources(dev_data, queue);
5666        if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) {
5667            // Clear all of the queue inFlightCmdBuffers (global set cleared below)
5668            dev_data->queueMap[queue].inFlightCmdBuffers.clear();
5669        }
5670    }
5671    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5672        skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5673    }
5674    dev_data->globalInFlightCmdBuffers.clear();
5675    loader_platform_thread_unlock_mutex(&globalLock);
5676    if (VK_FALSE != skip_call)
5677        return VK_ERROR_VALIDATION_FAILED_EXT;
5678    VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device);
5679#if MTMERGESOURCE
5680    if (VK_SUCCESS == result) {
5681        loader_platform_thread_lock_mutex(&globalLock);
5682        retire_device_fences(dev_data, device);
5683        loader_platform_thread_unlock_mutex(&globalLock);
5684    }
5685#endif
5686    return result;
5687}
5688
5689VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
5690    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5691    bool skipCall = false;
5692    loader_platform_thread_lock_mutex(&globalLock);
5693    if (dev_data->fenceMap[fence].in_use.load()) {
5694        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5695                            (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5696                            "Fence %#" PRIx64 " is in use by a command buffer.", (uint64_t)(fence));
5697    }
5698#if MTMERGESOURCE
5699    delete_fence_info(dev_data, fence);
5700    auto item = dev_data->fenceMap.find(fence);
5701    if (item != dev_data->fenceMap.end()) {
5702        dev_data->fenceMap.erase(item);
5703    }
5704#endif
5705    loader_platform_thread_unlock_mutex(&globalLock);
5706    if (!skipCall)
5707        dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
5708}
5709
5710VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5711vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5712    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5713    dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
5714    loader_platform_thread_lock_mutex(&globalLock);
5715    auto item = dev_data->semaphoreMap.find(semaphore);
5716    if (item != dev_data->semaphoreMap.end()) {
5717        if (item->second.in_use.load()) {
5718            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5719                    reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
5720                    "Cannot delete semaphore %" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore));
5721        }
5722        dev_data->semaphoreMap.erase(semaphore);
5723    }
5724    loader_platform_thread_unlock_mutex(&globalLock);
5725    // TODO : Clean up any internal data structures using this obj.
5726}
5727
5728VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5729    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5730    bool skip_call = false;
5731    loader_platform_thread_lock_mutex(&globalLock);
5732    auto event_data = dev_data->eventMap.find(event);
5733    if (event_data != dev_data->eventMap.end()) {
5734        if (event_data->second.in_use.load()) {
5735            skip_call |= log_msg(
5736                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5737                reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
5738                "Cannot delete event %" PRIx64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event));
5739        }
5740        dev_data->eventMap.erase(event_data);
5741    }
5742    loader_platform_thread_unlock_mutex(&globalLock);
5743    if (!skip_call)
5744        dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator);
5745    // TODO : Clean up any internal data structures using this obj.
5746}
5747
5748VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5749vkDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5750    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5751        ->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
5752    // TODO : Clean up any internal data structures using this obj.
5753}
5754
5755VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
5756                                                     uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
5757                                                     VkQueryResultFlags flags) {
5758    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5759    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
5760    GLOBAL_CB_NODE *pCB = nullptr;
5761    loader_platform_thread_lock_mutex(&globalLock);
5762    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5763        pCB = getCBNode(dev_data, cmdBuffer);
5764        for (auto queryStatePair : pCB->queryToStateMap) {
5765            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
5766        }
5767    }
5768    VkBool32 skip_call = VK_FALSE;
5769    for (uint32_t i = 0; i < queryCount; ++i) {
5770        QueryObject query = {queryPool, firstQuery + i};
5771        auto queryElement = queriesInFlight.find(query);
5772        auto queryToStateElement = dev_data->queryToStateMap.find(query);
5773        if (queryToStateElement != dev_data->queryToStateMap.end()) {
5774            // Available and in flight
5775            if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5776                queryToStateElement->second) {
5777                for (auto cmdBuffer : queryElement->second) {
5778                    pCB = getCBNode(dev_data, cmdBuffer);
5779                    auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
5780                    if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
5781                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5782                                             VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5783                                             "Cannot get query results on queryPool %" PRIu64 " with index %d which is in flight.",
5784                                             (uint64_t)(queryPool), firstQuery + i);
5785                    } else {
5786                        for (auto event : queryEventElement->second) {
5787                            dev_data->eventMap[event].needsSignaled = true;
5788                        }
5789                    }
5790                }
5791                // Unavailable and in flight
5792            } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5793                       !queryToStateElement->second) {
5794                // TODO : Can there be the same query in use by multiple command buffers in flight?
5795                bool make_available = false;
5796                for (auto cmdBuffer : queryElement->second) {
5797                    pCB = getCBNode(dev_data, cmdBuffer);
5798                    make_available |= pCB->queryToStateMap[query];
5799                }
5800                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5801                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5802                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5803                                         "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5804                                         (uint64_t)(queryPool), firstQuery + i);
5805                }
5806                // Unavailable
5807            } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5808                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5809                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5810                                     "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5811                                     (uint64_t)(queryPool), firstQuery + i);
5812                // Unitialized
5813            } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5814                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5815                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5816                                     "Cannot get query results on queryPool %" PRIu64
5817                                     " with index %d as data has not been collected for this index.",
5818                                     (uint64_t)(queryPool), firstQuery + i);
5819            }
5820        }
5821    }
5822    loader_platform_thread_unlock_mutex(&globalLock);
5823    if (skip_call)
5824        return VK_ERROR_VALIDATION_FAILED_EXT;
5825    return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride,
5826                                                                flags);
5827}
5828
5829VkBool32 validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5830    VkBool32 skip_call = VK_FALSE;
5831    auto buffer_data = my_data->bufferMap.find(buffer);
5832    if (buffer_data == my_data->bufferMap.end()) {
5833        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5834                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5835                             "Cannot free buffer %" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5836    } else {
5837        if (buffer_data->second.in_use.load()) {
5838            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5839                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5840                                 "Cannot free buffer %" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5841        }
5842    }
5843    return skip_call;
5844}
5845
5846VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5847vkDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
5848    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5849    VkBool32 skipCall = VK_FALSE;
5850    loader_platform_thread_lock_mutex(&globalLock);
5851#if MTMERGESOURCE
5852    auto item = dev_data->bufferBindingMap.find((uint64_t)buffer);
5853    if (item != dev_data->bufferBindingMap.end()) {
5854        skipCall = clear_object_binding(dev_data, device, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5855        dev_data->bufferBindingMap.erase(item);
5856    }
5857#endif
5858    if (!validateIdleBuffer(dev_data, buffer) && (VK_FALSE == skipCall)) {
5859        loader_platform_thread_unlock_mutex(&globalLock);
5860        dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator);
5861        loader_platform_thread_lock_mutex(&globalLock);
5862    }
5863    dev_data->bufferMap.erase(buffer);
5864    loader_platform_thread_unlock_mutex(&globalLock);
5865}
5866
5867VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5868vkDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5869    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5870    dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
5871    loader_platform_thread_lock_mutex(&globalLock);
5872    auto item = dev_data->bufferViewMap.find(bufferView);
5873    if (item != dev_data->bufferViewMap.end()) {
5874        dev_data->bufferViewMap.erase(item);
5875    }
5876    loader_platform_thread_unlock_mutex(&globalLock);
5877}
5878
5879VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5880    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5881    VkBool32 skipCall = VK_FALSE;
5882#if MTMERGESOURCE
5883    loader_platform_thread_lock_mutex(&globalLock);
5884    auto item = dev_data->imageBindingMap.find((uint64_t)image);
5885    if (item != dev_data->imageBindingMap.end()) {
5886        skipCall = clear_object_binding(dev_data, device, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5887        dev_data->imageBindingMap.erase(item);
5888    }
5889    loader_platform_thread_unlock_mutex(&globalLock);
5890#endif
5891    if (VK_FALSE == skipCall)
5892        dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
5893
5894    loader_platform_thread_lock_mutex(&globalLock);
5895    const auto& entry = dev_data->imageMap.find(image);
5896    if (entry != dev_data->imageMap.end()) {
5897        // Clear any memory mapping for this image
5898        const auto &mem_entry = dev_data->memObjMap.find(entry->second.mem);
5899        if (mem_entry != dev_data->memObjMap.end())
5900            mem_entry->second.image = VK_NULL_HANDLE;
5901
5902        // Remove image from imageMap
5903        dev_data->imageMap.erase(entry);
5904    }
5905    const auto& subEntry = dev_data->imageSubresourceMap.find(image);
5906    if (subEntry != dev_data->imageSubresourceMap.end()) {
5907        for (const auto& pair : subEntry->second) {
5908            dev_data->imageLayoutMap.erase(pair);
5909        }
5910        dev_data->imageSubresourceMap.erase(subEntry);
5911    }
5912    loader_platform_thread_unlock_mutex(&globalLock);
5913}
5914#if MTMERGESOURCE
5915VkBool32 print_memory_range_error(layer_data *dev_data, const uint64_t object_handle, const uint64_t other_handle,
5916                                  VkDebugReportObjectTypeEXT object_type) {
5917    if (object_type == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) {
5918        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5919                       MEMTRACK_INVALID_ALIASING, "MEM", "Buffer %" PRIx64 " is alised with image %" PRIx64, object_handle,
5920                       other_handle);
5921    } else {
5922        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5923                       MEMTRACK_INVALID_ALIASING, "MEM", "Image %" PRIx64 " is alised with buffer %" PRIx64, object_handle,
5924                       other_handle);
5925    }
5926}
5927
5928VkBool32 validate_memory_range(layer_data *dev_data, const vector<MEMORY_RANGE> &ranges, const MEMORY_RANGE &new_range,
5929                               VkDebugReportObjectTypeEXT object_type) {
5930    VkBool32 skip_call = false;
5931
5932    for (auto range : ranges) {
5933        if ((range.end & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)) <
5934            (new_range.start & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)))
5935            continue;
5936        if ((range.start & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)) >
5937            (new_range.end & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)))
5938            continue;
5939        skip_call |= print_memory_range_error(dev_data, new_range.handle, range.handle, object_type);
5940    }
5941    return skip_call;
5942}
5943
5944VkBool32 validate_buffer_image_aliasing(layer_data *dev_data, uint64_t handle, VkDeviceMemory mem, VkDeviceSize memoryOffset,
5945                                        VkMemoryRequirements memRequirements, vector<MEMORY_RANGE> &ranges,
5946                                        const vector<MEMORY_RANGE> &other_ranges, VkDebugReportObjectTypeEXT object_type) {
5947    MEMORY_RANGE range;
5948    range.handle = handle;
5949    range.memory = mem;
5950    range.start = memoryOffset;
5951    range.end = memoryOffset + memRequirements.size - 1;
5952    ranges.push_back(range);
5953    return validate_memory_range(dev_data, other_ranges, range, object_type);
5954}
5955
5956VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5957vkBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5958    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5959    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5960    loader_platform_thread_lock_mutex(&globalLock);
5961    // Track objects tied to memory
5962    uint64_t buffer_handle = (uint64_t)(buffer);
5963    VkBool32 skipCall =
5964        set_mem_binding(dev_data, device, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5965    add_object_binding_info(dev_data, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, mem);
5966    {
5967        VkMemoryRequirements memRequirements;
5968        // MTMTODO : Shouldn't this call down the chain?
5969        vkGetBufferMemoryRequirements(device, buffer, &memRequirements);
5970        skipCall |= validate_buffer_image_aliasing(dev_data, buffer_handle, mem, memoryOffset, memRequirements,
5971                                                   dev_data->memObjMap[mem].bufferRanges, dev_data->memObjMap[mem].imageRanges,
5972                                                   VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5973        // Validate memory requirements alignment
5974        if (vk_safe_modulo(memoryOffset, memRequirements.alignment) != 0) {
5975            skipCall |=
5976                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
5977                        __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS",
5978                        "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be an integer multiple of the "
5979                        "VkMemoryRequirements::alignment value %#" PRIxLEAST64
5980                        ", returned from a call to vkGetBufferMemoryRequirements with buffer",
5981                        memoryOffset, memRequirements.alignment);
5982        }
5983        // Validate device limits alignments
5984        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].create_info->usage;
5985        if (usage & (VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)) {
5986            if (vk_safe_modulo(memoryOffset, dev_data->physDevProperties.properties.limits.minTexelBufferOffsetAlignment) != 0) {
5987                skipCall |=
5988                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5989                            0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS",
5990                            "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be a multiple of "
5991                            "device limit minTexelBufferOffsetAlignment %#" PRIxLEAST64,
5992                            memoryOffset, dev_data->physDevProperties.properties.limits.minTexelBufferOffsetAlignment);
5993            }
5994        }
5995        if (usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) {
5996            if (vk_safe_modulo(memoryOffset, dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
5997                skipCall |=
5998                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5999                            0, __LINE__, DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
6000                            "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be a multiple of "
6001                            "device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64,
6002                            memoryOffset, dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment);
6003            }
6004        }
6005        if (usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) {
6006            if (vk_safe_modulo(memoryOffset, dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
6007                skipCall |=
6008                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
6009                            0, __LINE__, DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
6010                            "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be a multiple of "
6011                            "device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64,
6012                            memoryOffset, dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment);
6013            }
6014        }
6015    }
6016    print_mem_list(dev_data, device);
6017    loader_platform_thread_unlock_mutex(&globalLock);
6018    if (VK_FALSE == skipCall) {
6019        result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset);
6020    }
6021    return result;
6022}
6023
6024VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6025vkGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
6026    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6027    // TODO : What to track here?
6028    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
6029    my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
6030}
6031
6032VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6033vkGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
6034    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6035    // TODO : What to track here?
6036    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
6037    my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements);
6038}
6039#endif
6040VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6041vkDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
6042    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6043        ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
6044    // TODO : Clean up any internal data structures using this obj.
6045}
6046
6047VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6048vkDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
6049    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6050
6051    loader_platform_thread_lock_mutex(&globalLock);
6052
6053    my_data->shaderModuleMap.erase(shaderModule);
6054
6055    loader_platform_thread_unlock_mutex(&globalLock);
6056
6057    my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
6058}
6059
6060VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6061vkDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
6062    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator);
6063    // TODO : Clean up any internal data structures using this obj.
6064}
6065
6066VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6067vkDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
6068    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6069        ->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
6070    // TODO : Clean up any internal data structures using this obj.
6071}
6072
6073VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6074vkDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
6075    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator);
6076    // TODO : Clean up any internal data structures using this obj.
6077}
6078
6079VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6080vkDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
6081    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6082        ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
6083    // TODO : Clean up any internal data structures using this obj.
6084}
6085
6086VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6087vkDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
6088    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6089        ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
6090    // TODO : Clean up any internal data structures using this obj.
6091}
6092
6093VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6094vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
6095    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6096
6097    bool skip_call = false;
6098    loader_platform_thread_lock_mutex(&globalLock);
6099    for (uint32_t i = 0; i < commandBufferCount; i++) {
6100#if MTMERGESOURCE
6101        clear_cmd_buf_and_mem_references(dev_data, pCommandBuffers[i]);
6102#endif
6103        if (dev_data->globalInFlightCmdBuffers.count(pCommandBuffers[i])) {
6104            skip_call |=
6105                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6106                        reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6107                        "Attempt to free command buffer (%#" PRIxLEAST64 ") which is in use.",
6108                        reinterpret_cast<uint64_t>(pCommandBuffers[i]));
6109        }
6110        // Delete CB information structure, and remove from commandBufferMap
6111        auto cb = dev_data->commandBufferMap.find(pCommandBuffers[i]);
6112        if (cb != dev_data->commandBufferMap.end()) {
6113            // reset prior to delete for data clean-up
6114            resetCB(dev_data, (*cb).second->commandBuffer);
6115            delete (*cb).second;
6116            dev_data->commandBufferMap.erase(cb);
6117        }
6118
6119        // Remove commandBuffer reference from commandPoolMap
6120        dev_data->commandPoolMap[commandPool].commandBuffers.remove(pCommandBuffers[i]);
6121    }
6122#if MTMERGESOURCE
6123    printCBList(dev_data, device);
6124#endif
6125    loader_platform_thread_unlock_mutex(&globalLock);
6126
6127    if (!skip_call)
6128        dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
6129}
6130
6131VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
6132                                                                   const VkAllocationCallbacks *pAllocator,
6133                                                                   VkCommandPool *pCommandPool) {
6134    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6135
6136    VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
6137
6138    if (VK_SUCCESS == result) {
6139        loader_platform_thread_lock_mutex(&globalLock);
6140        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
6141        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
6142        loader_platform_thread_unlock_mutex(&globalLock);
6143    }
6144    return result;
6145}
6146
6147VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
6148                                                                 const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
6149
6150    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6151    VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
6152    if (result == VK_SUCCESS) {
6153        loader_platform_thread_lock_mutex(&globalLock);
6154        dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo;
6155        loader_platform_thread_unlock_mutex(&globalLock);
6156    }
6157    return result;
6158}
6159
6160VkBool32 validateCommandBuffersNotInUse(const layer_data *dev_data, VkCommandPool commandPool) {
6161    VkBool32 skipCall = VK_FALSE;
6162    auto pool_data = dev_data->commandPoolMap.find(commandPool);
6163    if (pool_data != dev_data->commandPoolMap.end()) {
6164        for (auto cmdBuffer : pool_data->second.commandBuffers) {
6165            if (dev_data->globalInFlightCmdBuffers.count(cmdBuffer)) {
6166                skipCall |=
6167                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT,
6168                            (uint64_t)(commandPool), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
6169                            "Cannot reset command pool %" PRIx64 " when allocated command buffer %" PRIx64 " is in use.",
6170                            (uint64_t)(commandPool), (uint64_t)(cmdBuffer));
6171            }
6172        }
6173    }
6174    return skipCall;
6175}
6176
6177// Destroy commandPool along with all of the commandBuffers allocated from that pool
6178VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6179vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
6180    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6181    bool commandBufferComplete = false;
6182    bool skipCall = false;
6183    loader_platform_thread_lock_mutex(&globalLock);
6184#if MTMERGESOURCE
6185    // Verify that command buffers in pool are complete (not in-flight)
6186    // MTMTODO : Merge this with code below (separate *NotInUse() call)
6187    for (auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6188         it != dev_data->commandPoolMap[commandPool].commandBuffers.end(); it++) {
6189        commandBufferComplete = VK_FALSE;
6190        skipCall = checkCBCompleted(dev_data, *it, &commandBufferComplete);
6191        if (VK_FALSE == commandBufferComplete) {
6192            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6193                                (uint64_t)(*it), __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6194                                "Destroying Command Pool 0x%" PRIxLEAST64 " before "
6195                                "its command buffer (0x%" PRIxLEAST64 ") has completed.",
6196                                (uint64_t)(commandPool), reinterpret_cast<uint64_t>(*it));
6197        }
6198    }
6199#endif
6200    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandPoolMap
6201    if (dev_data->commandPoolMap.find(commandPool) != dev_data->commandPoolMap.end()) {
6202        for (auto poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6203             poolCb != dev_data->commandPoolMap[commandPool].commandBuffers.end();) {
6204            auto del_cb = dev_data->commandBufferMap.find(*poolCb);
6205            delete (*del_cb).second;                  // delete CB info structure
6206            dev_data->commandBufferMap.erase(del_cb); // Remove this command buffer
6207            poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.erase(
6208                poolCb); // Remove CB reference from commandPoolMap's list
6209        }
6210    }
6211    dev_data->commandPoolMap.erase(commandPool);
6212
6213    loader_platform_thread_unlock_mutex(&globalLock);
6214
6215    if (VK_TRUE == validateCommandBuffersNotInUse(dev_data, commandPool))
6216        return;
6217
6218    if (!skipCall)
6219        dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
6220#if MTMERGESOURCE
6221    loader_platform_thread_lock_mutex(&globalLock);
6222    auto item = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6223    // Remove command buffers from command buffer map
6224    while (item != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6225        auto del_item = item++;
6226        delete_cmd_buf_info(dev_data, commandPool, *del_item);
6227    }
6228    dev_data->commandPoolMap.erase(commandPool);
6229    loader_platform_thread_unlock_mutex(&globalLock);
6230#endif
6231}
6232
6233VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6234vkResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
6235    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6236    bool commandBufferComplete = false;
6237    bool skipCall = false;
6238    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6239#if MTMERGESOURCE
6240    // MTMTODO : Merge this with *NotInUse() call below
6241    loader_platform_thread_lock_mutex(&globalLock);
6242    auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6243    // Verify that CB's in pool are complete (not in-flight)
6244    while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6245        skipCall = checkCBCompleted(dev_data, (*it), &commandBufferComplete);
6246        if (!commandBufferComplete) {
6247            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6248                                (uint64_t)(*it), __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6249                                "Resetting CB %p before it has completed. You must check CB "
6250                                "flag before calling vkResetCommandBuffer().",
6251                                (*it));
6252        } else {
6253            // Clear memory references at this point.
6254            clear_cmd_buf_and_mem_references(dev_data, (*it));
6255        }
6256        ++it;
6257    }
6258    loader_platform_thread_unlock_mutex(&globalLock);
6259#endif
6260    if (VK_TRUE == validateCommandBuffersNotInUse(dev_data, commandPool))
6261        return VK_ERROR_VALIDATION_FAILED_EXT;
6262
6263    if (!skipCall)
6264        result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
6265
6266    // Reset all of the CBs allocated from this pool
6267    if (VK_SUCCESS == result) {
6268        loader_platform_thread_lock_mutex(&globalLock);
6269        auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6270        while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6271            resetCB(dev_data, (*it));
6272            ++it;
6273        }
6274        loader_platform_thread_unlock_mutex(&globalLock);
6275    }
6276    return result;
6277}
6278
6279VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
6280    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6281    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6282    bool skipCall = false;
6283    loader_platform_thread_lock_mutex(&globalLock);
6284    for (uint32_t i = 0; i < fenceCount; ++i) {
6285#if MTMERGESOURCE
6286        // Reset fence state in fenceCreateInfo structure
6287        // MTMTODO : Merge with code below
6288        auto fence_item = dev_data->fenceMap.find(pFences[i]);
6289        if (fence_item != dev_data->fenceMap.end()) {
6290            // Validate fences in SIGNALED state
6291            if (!(fence_item->second.createInfo.flags & VK_FENCE_CREATE_SIGNALED_BIT)) {
6292                // TODO: I don't see a Valid Usage section for ResetFences. This behavior should be documented there.
6293                skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6294                                   (uint64_t)pFences[i], __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
6295                                   "Fence %#" PRIxLEAST64 " submitted to VkResetFences in UNSIGNALED STATE", (uint64_t)pFences[i]);
6296            } else {
6297                fence_item->second.createInfo.flags =
6298                    static_cast<VkFenceCreateFlags>(fence_item->second.createInfo.flags & ~VK_FENCE_CREATE_SIGNALED_BIT);
6299            }
6300        }
6301#endif
6302        if (dev_data->fenceMap[pFences[i]].in_use.load()) {
6303            skipCall |=
6304                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6305                        reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
6306                        "Fence %#" PRIx64 " is in use by a command buffer.", reinterpret_cast<const uint64_t &>(pFences[i]));
6307        }
6308    }
6309    loader_platform_thread_unlock_mutex(&globalLock);
6310    if (!skipCall)
6311        result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences);
6312    return result;
6313}
6314
6315VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6316vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
6317    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6318    loader_platform_thread_lock_mutex(&globalLock);
6319    auto fbNode = dev_data->frameBufferMap.find(framebuffer);
6320    if (fbNode != dev_data->frameBufferMap.end()) {
6321        for (auto cb : fbNode->second.referencingCmdBuffers) {
6322            auto cbNode = dev_data->commandBufferMap.find(cb);
6323            if (cbNode != dev_data->commandBufferMap.end()) {
6324                // Set CB as invalid and record destroyed framebuffer
6325                cbNode->second->state = CB_INVALID;
6326                cbNode->second->destroyedFramebuffers.insert(framebuffer);
6327            }
6328        }
6329        delete [] fbNode->second.createInfo.pAttachments;
6330        dev_data->frameBufferMap.erase(fbNode);
6331    }
6332    loader_platform_thread_unlock_mutex(&globalLock);
6333    dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
6334}
6335
6336VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6337vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
6338    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6339    dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
6340    loader_platform_thread_lock_mutex(&globalLock);
6341    dev_data->renderPassMap.erase(renderPass);
6342    loader_platform_thread_unlock_mutex(&globalLock);
6343}
6344
6345VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
6346                                                              const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
6347    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6348
6349    VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
6350
6351    if (VK_SUCCESS == result) {
6352        loader_platform_thread_lock_mutex(&globalLock);
6353#if MTMERGESOURCE
6354        add_object_create_info(dev_data, (uint64_t)*pBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, pCreateInfo);
6355#endif
6356        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
6357        dev_data->bufferMap[*pBuffer].create_info = unique_ptr<VkBufferCreateInfo>(new VkBufferCreateInfo(*pCreateInfo));
6358        dev_data->bufferMap[*pBuffer].in_use.store(0);
6359        loader_platform_thread_unlock_mutex(&globalLock);
6360    }
6361    return result;
6362}
6363
6364VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6365                                                                  const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
6366    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6367    VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
6368    if (VK_SUCCESS == result) {
6369        loader_platform_thread_lock_mutex(&globalLock);
6370        dev_data->bufferViewMap[*pView] = VkBufferViewCreateInfo(*pCreateInfo);
6371#if MTMERGESOURCE
6372        // In order to create a valid buffer view, the buffer must have been created with at least one of the
6373        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
6374        validate_buffer_usage_flags(dev_data, device, pCreateInfo->buffer,
6375                                    VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, VK_FALSE,
6376                                    "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
6377#endif
6378        loader_platform_thread_unlock_mutex(&globalLock);
6379    }
6380    return result;
6381}
6382
6383VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
6384                                                             const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
6385    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6386
6387    VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
6388
6389    if (VK_SUCCESS == result) {
6390        loader_platform_thread_lock_mutex(&globalLock);
6391#if MTMERGESOURCE
6392        add_object_create_info(dev_data, (uint64_t)*pImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, pCreateInfo);
6393#endif
6394        IMAGE_LAYOUT_NODE image_node;
6395        image_node.layout = pCreateInfo->initialLayout;
6396        image_node.format = pCreateInfo->format;
6397        dev_data->imageMap[*pImage].createInfo = *pCreateInfo;
6398        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
6399        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
6400        dev_data->imageLayoutMap[subpair] = image_node;
6401        loader_platform_thread_unlock_mutex(&globalLock);
6402    }
6403    return result;
6404}
6405
6406static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
6407    /* expects globalLock to be held by caller */
6408
6409    auto image_node_it = dev_data->imageMap.find(image);
6410    if (image_node_it != dev_data->imageMap.end()) {
6411        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
6412         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
6413         * the actual values.
6414         */
6415        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
6416            range->levelCount = image_node_it->second.createInfo.mipLevels - range->baseMipLevel;
6417        }
6418
6419        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
6420            range->layerCount = image_node_it->second.createInfo.arrayLayers - range->baseArrayLayer;
6421        }
6422    }
6423}
6424
6425// Return the correct layer/level counts if the caller used the special
6426// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
6427static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6428                                         VkImage image) {
6429    /* expects globalLock to be held by caller */
6430
6431    *levels = range.levelCount;
6432    *layers = range.layerCount;
6433    auto image_node_it = dev_data->imageMap.find(image);
6434    if (image_node_it != dev_data->imageMap.end()) {
6435        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
6436            *levels = image_node_it->second.createInfo.mipLevels - range.baseMipLevel;
6437        }
6438        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
6439            *layers = image_node_it->second.createInfo.arrayLayers - range.baseArrayLayer;
6440        }
6441    }
6442}
6443
6444VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6445                                                                 const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6446    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6447    VkResult result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
6448    if (VK_SUCCESS == result) {
6449        loader_platform_thread_lock_mutex(&globalLock);
6450        VkImageViewCreateInfo localCI = VkImageViewCreateInfo(*pCreateInfo);
6451        ResolveRemainingLevelsLayers(dev_data, &localCI.subresourceRange, pCreateInfo->image);
6452        dev_data->imageViewMap[*pView] = localCI;
6453#if MTMERGESOURCE
6454        // Validate that img has correct usage flags set
6455        validate_image_usage_flags(dev_data, device, pCreateInfo->image,
6456                                   VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT |
6457                                       VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6458                                   VK_FALSE, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT");
6459#endif
6460        loader_platform_thread_unlock_mutex(&globalLock);
6461    }
6462    return result;
6463}
6464
6465VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6466vkCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6467    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6468    VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
6469    if (VK_SUCCESS == result) {
6470        loader_platform_thread_lock_mutex(&globalLock);
6471        FENCE_NODE *pFN = &dev_data->fenceMap[*pFence];
6472#if MTMERGESOURCE
6473        memset(pFN, 0, sizeof(MT_FENCE_INFO));
6474        memcpy(&(pFN->createInfo), pCreateInfo, sizeof(VkFenceCreateInfo));
6475        if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
6476            pFN->firstTimeFlag = VK_TRUE;
6477        }
6478#endif
6479        pFN->in_use.store(0);
6480        loader_platform_thread_unlock_mutex(&globalLock);
6481    }
6482    return result;
6483}
6484
6485// TODO handle pipeline caches
6486VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6487                                                     const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6488    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6489    VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6490    return result;
6491}
6492
6493VKAPI_ATTR void VKAPI_CALL
6494vkDestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
6495    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6496    dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator);
6497}
6498
6499VKAPI_ATTR VkResult VKAPI_CALL
6500vkGetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
6501    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6502    VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6503    return result;
6504}
6505
6506VKAPI_ATTR VkResult VKAPI_CALL
6507vkMergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
6508    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6509    VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6510    return result;
6511}
6512
6513// utility function to set collective state for pipeline
6514void set_pipeline_state(PIPELINE_NODE *pPipe) {
6515    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
6516    if (pPipe->graphicsPipelineCI.pColorBlendState) {
6517        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
6518            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
6519                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6520                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6521                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6522                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6523                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6524                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6525                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6526                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
6527                    pPipe->blendConstantsEnabled = true;
6528                }
6529            }
6530        }
6531    }
6532}
6533
6534VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6535vkCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6536                          const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6537                          VkPipeline *pPipelines) {
6538    VkResult result = VK_SUCCESS;
6539    // TODO What to do with pipelineCache?
6540    // The order of operations here is a little convoluted but gets the job done
6541    //  1. Pipeline create state is first shadowed into PIPELINE_NODE struct
6542    //  2. Create state is then validated (which uses flags setup during shadowing)
6543    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6544    VkBool32 skipCall = VK_FALSE;
6545    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6546    vector<PIPELINE_NODE *> pPipeNode(count);
6547    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6548
6549    uint32_t i = 0;
6550    loader_platform_thread_lock_mutex(&globalLock);
6551
6552    for (i = 0; i < count; i++) {
6553        pPipeNode[i] = initGraphicsPipeline(dev_data, &pCreateInfos[i]);
6554        skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
6555    }
6556
6557    if (VK_FALSE == skipCall) {
6558        loader_platform_thread_unlock_mutex(&globalLock);
6559        result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6560                                                                          pPipelines);
6561        loader_platform_thread_lock_mutex(&globalLock);
6562        for (i = 0; i < count; i++) {
6563            pPipeNode[i]->pipeline = pPipelines[i];
6564            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6565        }
6566        loader_platform_thread_unlock_mutex(&globalLock);
6567    } else {
6568        for (i = 0; i < count; i++) {
6569            delete pPipeNode[i];
6570        }
6571        loader_platform_thread_unlock_mutex(&globalLock);
6572        return VK_ERROR_VALIDATION_FAILED_EXT;
6573    }
6574    return result;
6575}
6576
6577VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6578vkCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6579                         const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6580                         VkPipeline *pPipelines) {
6581    VkResult result = VK_SUCCESS;
6582    VkBool32 skipCall = VK_FALSE;
6583
6584    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6585    vector<PIPELINE_NODE *> pPipeNode(count);
6586    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6587
6588    uint32_t i = 0;
6589    loader_platform_thread_lock_mutex(&globalLock);
6590    for (i = 0; i < count; i++) {
6591        // TODO: Verify compute stage bits
6592
6593        // Create and initialize internal tracking data structure
6594        pPipeNode[i] = new PIPELINE_NODE;
6595        memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
6596
6597        // TODO: Add Compute Pipeline Verification
6598        // skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
6599    }
6600
6601    if (VK_FALSE == skipCall) {
6602        loader_platform_thread_unlock_mutex(&globalLock);
6603        result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6604                                                                         pPipelines);
6605        loader_platform_thread_lock_mutex(&globalLock);
6606        for (i = 0; i < count; i++) {
6607            pPipeNode[i]->pipeline = pPipelines[i];
6608            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6609        }
6610        loader_platform_thread_unlock_mutex(&globalLock);
6611    } else {
6612        for (i = 0; i < count; i++) {
6613            // Clean up any locally allocated data structures
6614            delete pPipeNode[i];
6615        }
6616        loader_platform_thread_unlock_mutex(&globalLock);
6617        return VK_ERROR_VALIDATION_FAILED_EXT;
6618    }
6619    return result;
6620}
6621
6622VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6623                                                               const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6624    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6625    VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6626    if (VK_SUCCESS == result) {
6627        loader_platform_thread_lock_mutex(&globalLock);
6628        dev_data->sampleMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
6629        loader_platform_thread_unlock_mutex(&globalLock);
6630    }
6631    return result;
6632}
6633
6634VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6635vkCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6636                            const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
6637    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6638    VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6639    if (VK_SUCCESS == result) {
6640        // TODOSC : Capture layout bindings set
6641        LAYOUT_NODE *pNewNode = new LAYOUT_NODE;
6642        if (NULL == pNewNode) {
6643            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
6644                        (uint64_t)*pSetLayout, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6645                        "Out of memory while attempting to allocate LAYOUT_NODE in vkCreateDescriptorSetLayout()"))
6646                return VK_ERROR_VALIDATION_FAILED_EXT;
6647        }
6648        memcpy((void *)&pNewNode->createInfo, pCreateInfo, sizeof(VkDescriptorSetLayoutCreateInfo));
6649        pNewNode->createInfo.pBindings = new VkDescriptorSetLayoutBinding[pCreateInfo->bindingCount];
6650        memcpy((void *)pNewNode->createInfo.pBindings, pCreateInfo->pBindings,
6651               sizeof(VkDescriptorSetLayoutBinding) * pCreateInfo->bindingCount);
6652        // g++ does not like reserve with size 0
6653        if (pCreateInfo->bindingCount)
6654            pNewNode->bindingToIndexMap.reserve(pCreateInfo->bindingCount);
6655        uint32_t totalCount = 0;
6656        for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
6657            if (!pNewNode->bindingToIndexMap.emplace(pCreateInfo->pBindings[i].binding, i).second) {
6658                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6659                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)*pSetLayout, __LINE__,
6660                            DRAWSTATE_INVALID_LAYOUT, "DS", "duplicated binding number in "
6661                                                            "VkDescriptorSetLayoutBinding"))
6662                    return VK_ERROR_VALIDATION_FAILED_EXT;
6663            } else {
6664                pNewNode->bindingToIndexMap[pCreateInfo->pBindings[i].binding] = i;
6665            }
6666            totalCount += pCreateInfo->pBindings[i].descriptorCount;
6667            if (pCreateInfo->pBindings[i].pImmutableSamplers) {
6668                VkSampler **ppIS = (VkSampler **)&pNewNode->createInfo.pBindings[i].pImmutableSamplers;
6669                *ppIS = new VkSampler[pCreateInfo->pBindings[i].descriptorCount];
6670                memcpy(*ppIS, pCreateInfo->pBindings[i].pImmutableSamplers,
6671                       pCreateInfo->pBindings[i].descriptorCount * sizeof(VkSampler));
6672            }
6673        }
6674        pNewNode->layout = *pSetLayout;
6675        pNewNode->startIndex = 0;
6676        if (totalCount > 0) {
6677            pNewNode->descriptorTypes.resize(totalCount);
6678            pNewNode->stageFlags.resize(totalCount);
6679            uint32_t offset = 0;
6680            uint32_t j = 0;
6681            VkDescriptorType dType;
6682            for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
6683                dType = pCreateInfo->pBindings[i].descriptorType;
6684                for (j = 0; j < pCreateInfo->pBindings[i].descriptorCount; j++) {
6685                    pNewNode->descriptorTypes[offset + j] = dType;
6686                    pNewNode->stageFlags[offset + j] = pCreateInfo->pBindings[i].stageFlags;
6687                    if ((dType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
6688                        (dType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
6689                        pNewNode->dynamicDescriptorCount++;
6690                    }
6691                }
6692                offset += j;
6693            }
6694            pNewNode->endIndex = pNewNode->startIndex + totalCount - 1;
6695        } else { // no descriptors
6696            pNewNode->endIndex = 0;
6697        }
6698        // Put new node at Head of global Layer list
6699        loader_platform_thread_lock_mutex(&globalLock);
6700        dev_data->descriptorSetLayoutMap[*pSetLayout] = pNewNode;
6701        loader_platform_thread_unlock_mutex(&globalLock);
6702    }
6703    return result;
6704}
6705
6706static bool validatePushConstantSize(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6707                                     const char *caller_name) {
6708    bool skipCall = false;
6709    if ((offset + size) > dev_data->physDevProperties.properties.limits.maxPushConstantsSize) {
6710        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6711                           DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
6712                                                                 "exceeds this device's maxPushConstantSize of %u.",
6713                           caller_name, offset, size, dev_data->physDevProperties.properties.limits.maxPushConstantsSize);
6714    }
6715    return skipCall;
6716}
6717
6718VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6719                                                      const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
6720    bool skipCall = false;
6721    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6722    uint32_t i = 0;
6723    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6724        skipCall |= validatePushConstantSize(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
6725                                             pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()");
6726        if ((pCreateInfo->pPushConstantRanges[i].size == 0) || ((pCreateInfo->pPushConstantRanges[i].size & 0x3) != 0)) {
6727            skipCall |=
6728                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6729                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constant index %u with "
6730                                                              "size %u. Size must be greater than zero and a multiple of 4.",
6731                        i, pCreateInfo->pPushConstantRanges[i].size);
6732        }
6733        // TODO : Add warning if ranges overlap
6734    }
6735    VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
6736    if (VK_SUCCESS == result) {
6737        loader_platform_thread_lock_mutex(&globalLock);
6738        // TODOSC : Merge capture of the setLayouts per pipeline
6739        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
6740        plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount);
6741        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6742            plNode.descriptorSetLayouts[i] = pCreateInfo->pSetLayouts[i];
6743        }
6744        plNode.pushConstantRanges.resize(pCreateInfo->pushConstantRangeCount);
6745        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6746            plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i];
6747        }
6748        loader_platform_thread_unlock_mutex(&globalLock);
6749    }
6750    return result;
6751}
6752
6753VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6754vkCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
6755                       VkDescriptorPool *pDescriptorPool) {
6756    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6757    VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
6758    if (VK_SUCCESS == result) {
6759        // Insert this pool into Global Pool LL at head
6760        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6761                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool %#" PRIxLEAST64,
6762                    (uint64_t)*pDescriptorPool))
6763            return VK_ERROR_VALIDATION_FAILED_EXT;
6764        DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
6765        if (NULL == pNewNode) {
6766            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6767                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6768                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
6769                return VK_ERROR_VALIDATION_FAILED_EXT;
6770        } else {
6771            loader_platform_thread_lock_mutex(&globalLock);
6772            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6773            loader_platform_thread_unlock_mutex(&globalLock);
6774        }
6775    } else {
6776        // Need to do anything if pool create fails?
6777    }
6778    return result;
6779}
6780
6781VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6782vkResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
6783    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6784    VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags);
6785    if (VK_SUCCESS == result) {
6786        loader_platform_thread_lock_mutex(&globalLock);
6787        clearDescriptorPool(dev_data, device, descriptorPool, flags);
6788        loader_platform_thread_unlock_mutex(&globalLock);
6789    }
6790    return result;
6791}
6792
6793VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6794vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
6795    VkBool32 skipCall = VK_FALSE;
6796    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6797
6798    loader_platform_thread_lock_mutex(&globalLock);
6799    // Verify that requested descriptorSets are available in pool
6800    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6801    if (!pPoolNode) {
6802        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6803                            (uint64_t)pAllocateInfo->descriptorPool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
6804                            "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
6805                            (uint64_t)pAllocateInfo->descriptorPool);
6806    } else { // Make sure pool has all the available descriptors before calling down chain
6807        skipCall |= validate_descriptor_availability_in_pool(dev_data, pPoolNode, pAllocateInfo->descriptorSetCount,
6808                                                             pAllocateInfo->pSetLayouts);
6809    }
6810    loader_platform_thread_unlock_mutex(&globalLock);
6811    if (skipCall)
6812        return VK_ERROR_VALIDATION_FAILED_EXT;
6813    VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6814    if (VK_SUCCESS == result) {
6815        loader_platform_thread_lock_mutex(&globalLock);
6816        DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6817        if (pPoolNode) {
6818            if (pAllocateInfo->descriptorSetCount == 0) {
6819                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6820                        pAllocateInfo->descriptorSetCount, __LINE__, DRAWSTATE_NONE, "DS",
6821                        "AllocateDescriptorSets called with 0 count");
6822            }
6823            for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
6824                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6825                        (uint64_t)pDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS", "Created Descriptor Set %#" PRIxLEAST64,
6826                        (uint64_t)pDescriptorSets[i]);
6827                // Create new set node and add to head of pool nodes
6828                SET_NODE *pNewNode = new SET_NODE;
6829                if (NULL == pNewNode) {
6830                    if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6831                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6832                                DRAWSTATE_OUT_OF_MEMORY, "DS",
6833                                "Out of memory while attempting to allocate SET_NODE in vkAllocateDescriptorSets()")) {
6834                        loader_platform_thread_unlock_mutex(&globalLock);
6835                        return VK_ERROR_VALIDATION_FAILED_EXT;
6836                    }
6837                } else {
6838                    // TODO : Pool should store a total count of each type of Descriptor available
6839                    //  When descriptors are allocated, decrement the count and validate here
6840                    //  that the count doesn't go below 0. One reset/free need to bump count back up.
6841                    // Insert set at head of Set LL for this pool
6842                    pNewNode->pNext = pPoolNode->pSets;
6843                    pNewNode->in_use.store(0);
6844                    pPoolNode->pSets = pNewNode;
6845                    LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pAllocateInfo->pSetLayouts[i]);
6846                    if (NULL == pLayout) {
6847                        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6848                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pAllocateInfo->pSetLayouts[i],
6849                                    __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
6850                                    "Unable to find set layout node for layout %#" PRIxLEAST64
6851                                    " specified in vkAllocateDescriptorSets() call",
6852                                    (uint64_t)pAllocateInfo->pSetLayouts[i])) {
6853                            loader_platform_thread_unlock_mutex(&globalLock);
6854                            return VK_ERROR_VALIDATION_FAILED_EXT;
6855                        }
6856                    }
6857                    pNewNode->pLayout = pLayout;
6858                    pNewNode->pool = pAllocateInfo->descriptorPool;
6859                    pNewNode->set = pDescriptorSets[i];
6860                    pNewNode->descriptorCount = (pLayout->createInfo.bindingCount != 0) ? pLayout->endIndex + 1 : 0;
6861                    if (pNewNode->descriptorCount) {
6862                        pNewNode->pDescriptorUpdates.resize(pNewNode->descriptorCount);
6863                    }
6864                    dev_data->setMap[pDescriptorSets[i]] = pNewNode;
6865                }
6866            }
6867        }
6868        loader_platform_thread_unlock_mutex(&globalLock);
6869    }
6870    return result;
6871}
6872
6873VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6874vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6875    VkBool32 skipCall = VK_FALSE;
6876    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6877    // Make sure that no sets being destroyed are in-flight
6878    loader_platform_thread_lock_mutex(&globalLock);
6879    for (uint32_t i = 0; i < count; ++i)
6880        skipCall |= validateIdleDescriptorSet(dev_data, pDescriptorSets[i], "vkFreeDesriptorSets");
6881    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, descriptorPool);
6882    if (pPoolNode && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pPoolNode->createInfo.flags)) {
6883        // Can't Free from a NON_FREE pool
6884        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
6885                            (uint64_t)device, __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
6886                            "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6887                            "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
6888    }
6889    loader_platform_thread_unlock_mutex(&globalLock);
6890    if (VK_FALSE != skipCall)
6891        return VK_ERROR_VALIDATION_FAILED_EXT;
6892    VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6893    if (VK_SUCCESS == result) {
6894        loader_platform_thread_lock_mutex(&globalLock);
6895
6896        // Update available descriptor sets in pool
6897        pPoolNode->availableSets += count;
6898
6899        // For each freed descriptor add it back into the pool as available
6900        for (uint32_t i = 0; i < count; ++i) {
6901            SET_NODE *pSet = dev_data->setMap[pDescriptorSets[i]]; // getSetNode() without locking
6902            invalidateBoundCmdBuffers(dev_data, pSet);
6903            LAYOUT_NODE *pLayout = pSet->pLayout;
6904            uint32_t typeIndex = 0, poolSizeCount = 0;
6905            for (uint32_t j = 0; j < pLayout->createInfo.bindingCount; ++j) {
6906                typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType);
6907                poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount;
6908                pPoolNode->availableDescriptorTypeCount[typeIndex] += poolSizeCount;
6909            }
6910        }
6911        loader_platform_thread_unlock_mutex(&globalLock);
6912    }
6913    // TODO : Any other clean-up or book-keeping to do here?
6914    return result;
6915}
6916
6917VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6918vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6919                       uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6920    // dsUpdate will return VK_TRUE only if a bailout error occurs, so we want to call down tree when update returns VK_FALSE
6921    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6922    loader_platform_thread_lock_mutex(&globalLock);
6923    VkBool32 rtn = dsUpdate(dev_data, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
6924    loader_platform_thread_unlock_mutex(&globalLock);
6925    if (!rtn) {
6926        dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6927                                                              pDescriptorCopies);
6928    }
6929}
6930
6931VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6932vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6933    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6934    VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6935    if (VK_SUCCESS == result) {
6936        loader_platform_thread_lock_mutex(&globalLock);
6937        auto const &cp_it = dev_data->commandPoolMap.find(pCreateInfo->commandPool);
6938        if (cp_it != dev_data->commandPoolMap.end()) {
6939            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6940                // Add command buffer to its commandPool map
6941                cp_it->second.commandBuffers.push_back(pCommandBuffer[i]);
6942                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6943                // Add command buffer to map
6944                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6945                resetCB(dev_data, pCommandBuffer[i]);
6946                pCB->createInfo = *pCreateInfo;
6947                pCB->device = device;
6948            }
6949        }
6950#if MTMERGESOURCE
6951        printCBList(dev_data, device);
6952#endif
6953        loader_platform_thread_unlock_mutex(&globalLock);
6954    }
6955    return result;
6956}
6957
6958VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6959vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6960    VkBool32 skipCall = VK_FALSE;
6961    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6962    loader_platform_thread_lock_mutex(&globalLock);
6963    // Validate command buffer level
6964    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6965    if (pCB) {
6966#if MTMERGESOURCE
6967        bool commandBufferComplete = false;
6968        // MTMTODO : Merge this with code below
6969        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6970        skipCall = checkCBCompleted(dev_data, commandBuffer, &commandBufferComplete);
6971
6972        if (!commandBufferComplete) {
6973            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6974                                (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6975                                "Calling vkBeginCommandBuffer() on active CB %p before it has completed. "
6976                                "You must check CB flag before this call.",
6977                                commandBuffer);
6978        }
6979#endif
6980        if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6981            // Secondary Command Buffer
6982            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6983            if (!pInfo) {
6984                skipCall |=
6985                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6986                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6987                            "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must have inheritance info.",
6988                            reinterpret_cast<void *>(commandBuffer));
6989            } else {
6990                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6991                    if (!pInfo->renderPass) { // renderpass should NOT be null for an Secondary CB
6992                        skipCall |= log_msg(
6993                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6994                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6995                            "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must specify a valid renderpass parameter.",
6996                            reinterpret_cast<void *>(commandBuffer));
6997                    }
6998                    if (!pInfo->framebuffer) { // framebuffer may be null for an Secondary CB, but this affects perf
6999                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7000                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7001                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE,
7002                                            "DS", "vkBeginCommandBuffer(): Secondary Command Buffers (%p) may perform better if a "
7003                                                  "valid framebuffer parameter is specified.",
7004                                            reinterpret_cast<void *>(commandBuffer));
7005                    } else {
7006                        string errorString = "";
7007                        auto fbNode = dev_data->frameBufferMap.find(pInfo->framebuffer);
7008                        if (fbNode != dev_data->frameBufferMap.end()) {
7009                            VkRenderPass fbRP = fbNode->second.createInfo.renderPass;
7010                            if (!verify_renderpass_compatibility(dev_data, fbRP, pInfo->renderPass, errorString)) {
7011                                // renderPass that framebuffer was created with
7012                                // must
7013                                // be compatible with local renderPass
7014                                skipCall |=
7015                                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7016                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7017                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
7018                                            "DS", "vkBeginCommandBuffer(): Secondary Command "
7019                                                  "Buffer (%p) renderPass (%#" PRIxLEAST64 ") is incompatible w/ framebuffer "
7020                                                  "(%#" PRIxLEAST64 ") w/ render pass (%#" PRIxLEAST64 ") due to: %s",
7021                                            reinterpret_cast<void *>(commandBuffer), (uint64_t)(pInfo->renderPass),
7022                                            (uint64_t)(pInfo->framebuffer), (uint64_t)(fbRP), errorString.c_str());
7023                            }
7024                            // Connect this framebuffer to this cmdBuffer
7025                            fbNode->second.referencingCmdBuffers.insert(pCB->commandBuffer);
7026                        }
7027                    }
7028                }
7029                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
7030                     dev_data->physDevProperties.features.occlusionQueryPrecise == VK_FALSE) &&
7031                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
7032                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7033                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
7034                                        __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7035                                        "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must not have "
7036                                        "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
7037                                        "support precise occlusion queries.",
7038                                        reinterpret_cast<void *>(commandBuffer));
7039                }
7040            }
7041            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
7042                auto rp_data = dev_data->renderPassMap.find(pInfo->renderPass);
7043                if (rp_data != dev_data->renderPassMap.end() && rp_data->second && rp_data->second->pCreateInfo) {
7044                    if (pInfo->subpass >= rp_data->second->pCreateInfo->subpassCount) {
7045                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7046                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7047                                            DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7048                                            "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must has a subpass index (%d) "
7049                                            "that is less than the number of subpasses (%d).",
7050                                            (void *)commandBuffer, pInfo->subpass, rp_data->second->pCreateInfo->subpassCount);
7051                    }
7052                }
7053            }
7054        }
7055        if (CB_RECORDING == pCB->state) {
7056            skipCall |=
7057                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7058                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7059                        "vkBeginCommandBuffer(): Cannot call Begin on CB (%#" PRIxLEAST64
7060                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
7061                        (uint64_t)commandBuffer);
7062        } else if (CB_RECORDED == pCB->state) {
7063            VkCommandPool cmdPool = pCB->createInfo.commandPool;
7064            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
7065                skipCall |=
7066                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7067                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7068                            "Call to vkBeginCommandBuffer() on command buffer (%#" PRIxLEAST64
7069                            ") attempts to implicitly reset cmdBuffer created from command pool (%#" PRIxLEAST64
7070                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7071                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
7072            }
7073            resetCB(dev_data, commandBuffer);
7074        }
7075        // Set updated state here in case implicit reset occurs above
7076        pCB->state = CB_RECORDING;
7077        pCB->beginInfo = *pBeginInfo;
7078        if (pCB->beginInfo.pInheritanceInfo) {
7079            pCB->inheritanceInfo = *(pCB->beginInfo.pInheritanceInfo);
7080            pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo;
7081        }
7082    } else {
7083        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7084                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
7085                            "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB %p!", (void *)commandBuffer);
7086    }
7087    loader_platform_thread_unlock_mutex(&globalLock);
7088    if (VK_FALSE != skipCall) {
7089        return VK_ERROR_VALIDATION_FAILED_EXT;
7090    }
7091    VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo);
7092#if MTMERGESOURCE
7093    loader_platform_thread_lock_mutex(&globalLock);
7094    clear_cmd_buf_and_mem_references(dev_data, commandBuffer);
7095    loader_platform_thread_unlock_mutex(&globalLock);
7096#endif
7097    return result;
7098}
7099
7100VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffer commandBuffer) {
7101    VkBool32 skipCall = VK_FALSE;
7102    VkResult result = VK_SUCCESS;
7103    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7104    loader_platform_thread_lock_mutex(&globalLock);
7105    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7106    if (pCB) {
7107        if (pCB->state != CB_RECORDING) {
7108            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkEndCommandBuffer()");
7109        }
7110        for (auto query : pCB->activeQueries) {
7111            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7112                                DRAWSTATE_INVALID_QUERY, "DS",
7113                                "Ending command buffer with in progress query: queryPool %" PRIu64 ", index %d",
7114                                (uint64_t)(query.pool), query.index);
7115        }
7116    }
7117    if (VK_FALSE == skipCall) {
7118        loader_platform_thread_unlock_mutex(&globalLock);
7119        result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
7120        loader_platform_thread_lock_mutex(&globalLock);
7121        if (VK_SUCCESS == result) {
7122            pCB->state = CB_RECORDED;
7123            // Reset CB status flags
7124            pCB->status = 0;
7125            printCB(dev_data, commandBuffer);
7126        }
7127    } else {
7128        result = VK_ERROR_VALIDATION_FAILED_EXT;
7129    }
7130    loader_platform_thread_unlock_mutex(&globalLock);
7131    return result;
7132}
7133
7134VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
7135vkResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
7136    VkBool32 skipCall = VK_FALSE;
7137    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7138    loader_platform_thread_lock_mutex(&globalLock);
7139#if MTMERGESOURCE
7140    bool commandBufferComplete = false;
7141    // Verify that CB is complete (not in-flight)
7142    skipCall = checkCBCompleted(dev_data, commandBuffer, &commandBufferComplete);
7143    if (!commandBufferComplete) {
7144        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7145                            (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
7146                            "Resetting CB %p before it has completed. You must check CB "
7147                            "flag before calling vkResetCommandBuffer().",
7148                            commandBuffer);
7149    }
7150    // Clear memory references as this point.
7151    clear_cmd_buf_and_mem_references(dev_data, commandBuffer);
7152#endif
7153    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7154    VkCommandPool cmdPool = pCB->createInfo.commandPool;
7155    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
7156        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7157                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7158                            "Attempt to reset command buffer (%#" PRIxLEAST64 ") created from command pool (%#" PRIxLEAST64
7159                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7160                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
7161    }
7162    if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
7163        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7164                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7165                            "Attempt to reset command buffer (%#" PRIxLEAST64 ") which is in use.",
7166                            reinterpret_cast<uint64_t>(commandBuffer));
7167    }
7168    loader_platform_thread_unlock_mutex(&globalLock);
7169    if (skipCall != VK_FALSE)
7170        return VK_ERROR_VALIDATION_FAILED_EXT;
7171    VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags);
7172    if (VK_SUCCESS == result) {
7173        loader_platform_thread_lock_mutex(&globalLock);
7174        resetCB(dev_data, commandBuffer);
7175        loader_platform_thread_unlock_mutex(&globalLock);
7176    }
7177    return result;
7178}
7179#if MTMERGESOURCE
7180// TODO : For any vkCmdBind* calls that include an object which has mem bound to it,
7181//    need to account for that mem now having binding to given commandBuffer
7182#endif
7183VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7184vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
7185    VkBool32 skipCall = VK_FALSE;
7186    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7187    loader_platform_thread_lock_mutex(&globalLock);
7188    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7189    if (pCB) {
7190        skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
7191        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
7192            skipCall |=
7193                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7194                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
7195                        "Incorrectly binding compute pipeline (%#" PRIxLEAST64 ") during active RenderPass (%#" PRIxLEAST64 ")",
7196                        (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass);
7197        }
7198
7199        PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
7200        if (pPN) {
7201            pCB->lastBound[pipelineBindPoint].pipeline = pipeline;
7202            set_cb_pso_status(pCB, pPN);
7203            set_pipeline_state(pPN);
7204            skipCall |= validatePipelineState(dev_data, pCB, pipelineBindPoint, pipeline);
7205        } else {
7206            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7207                                (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
7208                                "Attempt to bind Pipeline %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
7209        }
7210    }
7211    loader_platform_thread_unlock_mutex(&globalLock);
7212    if (VK_FALSE == skipCall)
7213        dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
7214}
7215
7216VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7217vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
7218    VkBool32 skipCall = VK_FALSE;
7219    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7220    loader_platform_thread_lock_mutex(&globalLock);
7221    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7222    if (pCB) {
7223        skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
7224        pCB->status |= CBSTATUS_VIEWPORT_SET;
7225        pCB->viewports.resize(viewportCount);
7226        memcpy(pCB->viewports.data(), pViewports, viewportCount * sizeof(VkViewport));
7227    }
7228    loader_platform_thread_unlock_mutex(&globalLock);
7229    if (VK_FALSE == skipCall)
7230        dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
7231}
7232
7233VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7234vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
7235    VkBool32 skipCall = VK_FALSE;
7236    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7237    loader_platform_thread_lock_mutex(&globalLock);
7238    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7239    if (pCB) {
7240        skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
7241        pCB->status |= CBSTATUS_SCISSOR_SET;
7242        pCB->scissors.resize(scissorCount);
7243        memcpy(pCB->scissors.data(), pScissors, scissorCount * sizeof(VkRect2D));
7244    }
7245    loader_platform_thread_unlock_mutex(&globalLock);
7246    if (VK_FALSE == skipCall)
7247        dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
7248}
7249
7250VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
7251    VkBool32 skipCall = VK_FALSE;
7252    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7253    loader_platform_thread_lock_mutex(&globalLock);
7254    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7255    if (pCB) {
7256        skipCall |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
7257        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
7258    }
7259    loader_platform_thread_unlock_mutex(&globalLock);
7260    if (VK_FALSE == skipCall)
7261        dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth);
7262}
7263
7264VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7265vkCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
7266    VkBool32 skipCall = VK_FALSE;
7267    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7268    loader_platform_thread_lock_mutex(&globalLock);
7269    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7270    if (pCB) {
7271        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
7272        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
7273    }
7274    loader_platform_thread_unlock_mutex(&globalLock);
7275    if (VK_FALSE == skipCall)
7276        dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp,
7277                                                         depthBiasSlopeFactor);
7278}
7279
7280VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
7281    VkBool32 skipCall = VK_FALSE;
7282    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7283    loader_platform_thread_lock_mutex(&globalLock);
7284    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7285    if (pCB) {
7286        skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
7287        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
7288    }
7289    loader_platform_thread_unlock_mutex(&globalLock);
7290    if (VK_FALSE == skipCall)
7291        dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants);
7292}
7293
7294VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7295vkCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
7296    VkBool32 skipCall = VK_FALSE;
7297    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7298    loader_platform_thread_lock_mutex(&globalLock);
7299    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7300    if (pCB) {
7301        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
7302        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
7303    }
7304    loader_platform_thread_unlock_mutex(&globalLock);
7305    if (VK_FALSE == skipCall)
7306        dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
7307}
7308
7309VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7310vkCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
7311    VkBool32 skipCall = VK_FALSE;
7312    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7313    loader_platform_thread_lock_mutex(&globalLock);
7314    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7315    if (pCB) {
7316        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
7317        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
7318    }
7319    loader_platform_thread_unlock_mutex(&globalLock);
7320    if (VK_FALSE == skipCall)
7321        dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
7322}
7323
7324VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7325vkCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
7326    VkBool32 skipCall = VK_FALSE;
7327    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7328    loader_platform_thread_lock_mutex(&globalLock);
7329    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7330    if (pCB) {
7331        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7332        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7333    }
7334    loader_platform_thread_unlock_mutex(&globalLock);
7335    if (VK_FALSE == skipCall)
7336        dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
7337}
7338
7339VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7340vkCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
7341    VkBool32 skipCall = VK_FALSE;
7342    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7343    loader_platform_thread_lock_mutex(&globalLock);
7344    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7345    if (pCB) {
7346        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7347        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7348    }
7349    loader_platform_thread_unlock_mutex(&globalLock);
7350    if (VK_FALSE == skipCall)
7351        dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference);
7352}
7353
7354VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7355vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
7356                        uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7357                        const uint32_t *pDynamicOffsets) {
7358    VkBool32 skipCall = VK_FALSE;
7359    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7360    loader_platform_thread_lock_mutex(&globalLock);
7361    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7362    if (pCB) {
7363        if (pCB->state == CB_RECORDING) {
7364            // Track total count of dynamic descriptor types to make sure we have an offset for each one
7365            uint32_t totalDynamicDescriptors = 0;
7366            string errorString = "";
7367            uint32_t lastSetIndex = firstSet + setCount - 1;
7368            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size())
7369                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7370            VkDescriptorSet oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
7371            for (uint32_t i = 0; i < setCount; i++) {
7372                SET_NODE *pSet = getSetNode(dev_data, pDescriptorSets[i]);
7373                if (pSet) {
7374                    pCB->lastBound[pipelineBindPoint].uniqueBoundSets.insert(pDescriptorSets[i]);
7375                    pSet->boundCmdBuffers.insert(commandBuffer);
7376                    pCB->lastBound[pipelineBindPoint].pipelineLayout = layout;
7377                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pDescriptorSets[i];
7378                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7379                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7380                                        DRAWSTATE_NONE, "DS", "DS %#" PRIxLEAST64 " bound on pipeline %s",
7381                                        (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
7382                    if (!pSet->pUpdateStructs && (pSet->descriptorCount != 0)) {
7383                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7384                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
7385                                            __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
7386                                            "DS %#" PRIxLEAST64
7387                                            " bound but it was never updated. You may want to either update it or not bind it.",
7388                                            (uint64_t)pDescriptorSets[i]);
7389                    }
7390                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
7391                    if (!verify_set_layout_compatibility(dev_data, pSet, layout, i + firstSet, errorString)) {
7392                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7393                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
7394                                            __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
7395                                            "descriptorSet #%u being bound is not compatible with overlapping layout in "
7396                                            "pipelineLayout due to: %s",
7397                                            i, errorString.c_str());
7398                    }
7399                    if (pSet->pLayout->dynamicDescriptorCount) {
7400                        // First make sure we won't overstep bounds of pDynamicOffsets array
7401                        if ((totalDynamicDescriptors + pSet->pLayout->dynamicDescriptorCount) > dynamicOffsetCount) {
7402                            skipCall |=
7403                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7404                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7405                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7406                                        "descriptorSet #%u (%#" PRIxLEAST64
7407                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7408                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7409                                        i, (uint64_t)pDescriptorSets[i], pSet->pLayout->dynamicDescriptorCount,
7410                                        (dynamicOffsetCount - totalDynamicDescriptors));
7411                        } else { // Validate and store dynamic offsets with the set
7412                            // Validate Dynamic Offset Minimums
7413                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
7414                            for (uint32_t d = 0; d < pSet->descriptorCount; d++) {
7415                                if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7416                                    if (vk_safe_modulo(
7417                                            pDynamicOffsets[cur_dyn_offset],
7418                                            dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment) !=
7419                                        0) {
7420                                        skipCall |= log_msg(
7421                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7422                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7423                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
7424                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7425                                            "device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64,
7426                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7427                                            dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment);
7428                                    }
7429                                    cur_dyn_offset++;
7430                                } else if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7431                                    if (vk_safe_modulo(
7432                                            pDynamicOffsets[cur_dyn_offset],
7433                                            dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment) !=
7434                                        0) {
7435                                        skipCall |= log_msg(
7436                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7437                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7438                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
7439                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7440                                            "device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64,
7441                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7442                                            dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment);
7443                                    }
7444                                    cur_dyn_offset++;
7445                                }
7446                            }
7447                            // Keep running total of dynamic descriptor count to verify at the end
7448                            totalDynamicDescriptors += pSet->pLayout->dynamicDescriptorCount;
7449                        }
7450                    }
7451                } else {
7452                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7453                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7454                                        DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS %#" PRIxLEAST64 " that doesn't exist!",
7455                                        (uint64_t)pDescriptorSets[i]);
7456                }
7457                skipCall |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7458                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7459                if (firstSet > 0) { // Check set #s below the first bound set
7460                    for (uint32_t i = 0; i < firstSet; ++i) {
7461                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7462                            !verify_set_layout_compatibility(
7463                                dev_data, dev_data->setMap[pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i]], layout, i,
7464                                errorString)) {
7465                            skipCall |= log_msg(
7466                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7467                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7468                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7469                                "DescriptorSetDS %#" PRIxLEAST64
7470                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7471                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7472                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7473                        }
7474                    }
7475                }
7476                // Check if newly last bound set invalidates any remaining bound sets
7477                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7478                    if (oldFinalBoundSet &&
7479                        !verify_set_layout_compatibility(dev_data, dev_data->setMap[oldFinalBoundSet], layout, lastSetIndex,
7480                                                         errorString)) {
7481                        skipCall |=
7482                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7483                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)oldFinalBoundSet, __LINE__,
7484                                    DRAWSTATE_NONE, "DS", "DescriptorSetDS %#" PRIxLEAST64
7485                                                          " previously bound as set #%u is incompatible with set %#" PRIxLEAST64
7486                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
7487                                                          "disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7488                                    (uint64_t)oldFinalBoundSet, lastSetIndex,
7489                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7490                                    lastSetIndex + 1, (uint64_t)layout);
7491                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7492                    }
7493                }
7494                // Save dynamicOffsets bound to this CB
7495                for (uint32_t i = 0; i < dynamicOffsetCount; i++) {
7496                    pCB->lastBound[pipelineBindPoint].dynamicOffsets.push_back(pDynamicOffsets[i]);
7497                }
7498            }
7499            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7500            if (totalDynamicDescriptors != dynamicOffsetCount) {
7501                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7502                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7503                                    DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7504                                    "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7505                                    "is %u. It should exactly match the number of dynamic descriptors.",
7506                                    setCount, totalDynamicDescriptors, dynamicOffsetCount);
7507            }
7508            // Save dynamicOffsets bound to this CB
7509            for (uint32_t i = 0; i < dynamicOffsetCount; i++) {
7510                pCB->dynamicOffsets.emplace_back(pDynamicOffsets[i]);
7511            }
7512        } else {
7513            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7514        }
7515    }
7516    loader_platform_thread_unlock_mutex(&globalLock);
7517    if (VK_FALSE == skipCall)
7518        dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7519                                                               pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7520}
7521
7522VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7523vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
7524    VkBool32 skipCall = VK_FALSE;
7525    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7526    loader_platform_thread_lock_mutex(&globalLock);
7527#if MTMERGESOURCE
7528    VkDeviceMemory mem;
7529    skipCall =
7530        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7531    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7532    if (cb_data != dev_data->commandBufferMap.end()) {
7533        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindIndexBuffer()"); };
7534        cb_data->second->validate_functions.push_back(function);
7535    }
7536    // TODO : Somewhere need to verify that IBs have correct usage state flagged
7537#endif
7538    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7539    if (pCB) {
7540        skipCall |= addCmd(dev_data, pCB, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7541        VkDeviceSize offset_align = 0;
7542        switch (indexType) {
7543        case VK_INDEX_TYPE_UINT16:
7544            offset_align = 2;
7545            break;
7546        case VK_INDEX_TYPE_UINT32:
7547            offset_align = 4;
7548            break;
7549        default:
7550            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7551            break;
7552        }
7553        if (!offset_align || (offset % offset_align)) {
7554            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7555                                DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7556                                "vkCmdBindIndexBuffer() offset (%#" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7557                                offset, string_VkIndexType(indexType));
7558        }
7559        pCB->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7560    }
7561    loader_platform_thread_unlock_mutex(&globalLock);
7562    if (VK_FALSE == skipCall)
7563        dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7564}
7565
7566void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7567    uint32_t end = firstBinding + bindingCount;
7568    if (pCB->currentDrawData.buffers.size() < end) {
7569        pCB->currentDrawData.buffers.resize(end);
7570    }
7571    for (uint32_t i = 0; i < bindingCount; ++i) {
7572        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7573    }
7574}
7575
7576void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7577
7578VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7579                                                                  uint32_t bindingCount, const VkBuffer *pBuffers,
7580                                                                  const VkDeviceSize *pOffsets) {
7581    VkBool32 skipCall = VK_FALSE;
7582    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7583    loader_platform_thread_lock_mutex(&globalLock);
7584#if MTMERGESOURCE
7585    for (uint32_t i = 0; i < bindingCount; ++i) {
7586        VkDeviceMemory mem;
7587        skipCall |= get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)(pBuffers[i]),
7588                                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7589        auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7590        if (cb_data != dev_data->commandBufferMap.end()) {
7591            std::function<VkBool32()> function =
7592                [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindVertexBuffers()"); };
7593            cb_data->second->validate_functions.push_back(function);
7594        }
7595    }
7596    // TODO : Somewhere need to verify that VBs have correct usage state flagged
7597#endif
7598    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7599    if (pCB) {
7600        addCmd(dev_data, pCB, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7601        updateResourceTracking(pCB, firstBinding, bindingCount, pBuffers);
7602    } else {
7603        skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7604    }
7605    loader_platform_thread_unlock_mutex(&globalLock);
7606    if (VK_FALSE == skipCall)
7607        dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7608}
7609
7610/* expects globalLock to be held by caller */
7611bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
7612    bool skip_call = false;
7613
7614    for (auto imageView : pCB->updateImages) {
7615        auto iv_data = dev_data->imageViewMap.find(imageView);
7616        if (iv_data == dev_data->imageViewMap.end())
7617            continue;
7618        VkImage image = iv_data->second.image;
7619        VkDeviceMemory mem;
7620        skip_call |=
7621            get_mem_binding_from_object(dev_data, pCB->commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7622        std::function<VkBool32()> function = [=]() {
7623            set_memory_valid(dev_data, mem, true, image);
7624            return VK_FALSE;
7625        };
7626        pCB->validate_functions.push_back(function);
7627    }
7628    for (auto buffer : pCB->updateBuffers) {
7629        VkDeviceMemory mem;
7630        skip_call |= get_mem_binding_from_object(dev_data, pCB->commandBuffer, (uint64_t)buffer,
7631                                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7632        std::function<VkBool32()> function = [=]() {
7633            set_memory_valid(dev_data, mem, true);
7634            return VK_FALSE;
7635        };
7636        pCB->validate_functions.push_back(function);
7637    }
7638    return skip_call;
7639}
7640
7641VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7642                                                     uint32_t firstVertex, uint32_t firstInstance) {
7643    VkBool32 skipCall = VK_FALSE;
7644    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7645    loader_platform_thread_lock_mutex(&globalLock);
7646    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7647    if (pCB) {
7648        skipCall |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
7649        pCB->drawCount[DRAW]++;
7650        skipCall |= validate_and_update_draw_state(dev_data, pCB, VK_FALSE, VK_PIPELINE_BIND_POINT_GRAPHICS);
7651        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7652        // TODO : Need to pass commandBuffer as srcObj here
7653        skipCall |=
7654            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7655                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW]++);
7656        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7657        if (VK_FALSE == skipCall) {
7658            updateResourceTrackingOnDraw(pCB);
7659        }
7660        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
7661    }
7662    loader_platform_thread_unlock_mutex(&globalLock);
7663    if (VK_FALSE == skipCall)
7664        dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
7665}
7666
7667VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
7668                                                            uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
7669                                                            uint32_t firstInstance) {
7670    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7671    VkBool32 skipCall = VK_FALSE;
7672    loader_platform_thread_lock_mutex(&globalLock);
7673    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7674    if (pCB) {
7675        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
7676        pCB->drawCount[DRAW_INDEXED]++;
7677        skipCall |= validate_and_update_draw_state(dev_data, pCB, VK_TRUE, VK_PIPELINE_BIND_POINT_GRAPHICS);
7678        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7679        // TODO : Need to pass commandBuffer as srcObj here
7680        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7681                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7682                            "vkCmdDrawIndexed() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
7683        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7684        if (VK_FALSE == skipCall) {
7685            updateResourceTrackingOnDraw(pCB);
7686        }
7687        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
7688    }
7689    loader_platform_thread_unlock_mutex(&globalLock);
7690    if (VK_FALSE == skipCall)
7691        dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
7692                                                        firstInstance);
7693}
7694
7695VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7696vkCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7697    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7698    VkBool32 skipCall = VK_FALSE;
7699    loader_platform_thread_lock_mutex(&globalLock);
7700#if MTMERGESOURCE
7701    VkDeviceMemory mem;
7702    // MTMTODO : merge with code below
7703    skipCall =
7704        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7705    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndirect");
7706#endif
7707    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7708    if (pCB) {
7709        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
7710        pCB->drawCount[DRAW_INDIRECT]++;
7711        skipCall |= validate_and_update_draw_state(dev_data, pCB, VK_FALSE, VK_PIPELINE_BIND_POINT_GRAPHICS);
7712        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7713        // TODO : Need to pass commandBuffer as srcObj here
7714        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7715                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7716                            "vkCmdDrawIndirect() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
7717        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7718        if (VK_FALSE == skipCall) {
7719            updateResourceTrackingOnDraw(pCB);
7720        }
7721        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndirect");
7722    }
7723    loader_platform_thread_unlock_mutex(&globalLock);
7724    if (VK_FALSE == skipCall)
7725        dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
7726}
7727
7728VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7729vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7730    VkBool32 skipCall = VK_FALSE;
7731    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7732    loader_platform_thread_lock_mutex(&globalLock);
7733#if MTMERGESOURCE
7734    VkDeviceMemory mem;
7735    // MTMTODO : merge with code below
7736    skipCall =
7737        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7738    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndexedIndirect");
7739#endif
7740    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7741    if (pCB) {
7742        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
7743        pCB->drawCount[DRAW_INDEXED_INDIRECT]++;
7744        skipCall |= validate_and_update_draw_state(dev_data, pCB, VK_TRUE, VK_PIPELINE_BIND_POINT_GRAPHICS);
7745        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7746        // TODO : Need to pass commandBuffer as srcObj here
7747        skipCall |=
7748            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7749                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call #%" PRIu64 ", reporting DS state:",
7750                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
7751        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7752        if (VK_FALSE == skipCall) {
7753            updateResourceTrackingOnDraw(pCB);
7754        }
7755        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexedIndirect");
7756    }
7757    loader_platform_thread_unlock_mutex(&globalLock);
7758    if (VK_FALSE == skipCall)
7759        dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
7760}
7761
7762VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7763    VkBool32 skipCall = VK_FALSE;
7764    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7765    loader_platform_thread_lock_mutex(&globalLock);
7766    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7767    if (pCB) {
7768        // TODO : Re-enable validate_and_update_draw_state() when it supports compute shaders
7769        // skipCall |= validate_and_update_draw_state(dev_data, pCB, VK_FALSE, VK_PIPELINE_BIND_POINT_COMPUTE);
7770        // TODO : Call below is temporary until call above can be re-enabled
7771        update_shader_storage_images_and_buffers(dev_data, pCB);
7772        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7773        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
7774        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
7775    }
7776    loader_platform_thread_unlock_mutex(&globalLock);
7777    if (VK_FALSE == skipCall)
7778        dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
7779}
7780
7781VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7782vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7783    VkBool32 skipCall = VK_FALSE;
7784    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7785    loader_platform_thread_lock_mutex(&globalLock);
7786#if MTMERGESOURCE
7787    VkDeviceMemory mem;
7788    skipCall =
7789        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7790    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDispatchIndirect");
7791#endif
7792    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7793    if (pCB) {
7794        // TODO : Re-enable validate_and_update_draw_state() when it supports compute shaders
7795        // skipCall |= validate_and_update_draw_state(dev_data, pCB, VK_FALSE, VK_PIPELINE_BIND_POINT_COMPUTE);
7796        // TODO : Call below is temporary until call above can be re-enabled
7797        update_shader_storage_images_and_buffers(dev_data, pCB);
7798        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7799        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
7800        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatchIndirect");
7801    }
7802    loader_platform_thread_unlock_mutex(&globalLock);
7803    if (VK_FALSE == skipCall)
7804        dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset);
7805}
7806
7807VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7808                                                           uint32_t regionCount, const VkBufferCopy *pRegions) {
7809    VkBool32 skipCall = VK_FALSE;
7810    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7811    loader_platform_thread_lock_mutex(&globalLock);
7812#if MTMERGESOURCE
7813    VkDeviceMemory mem;
7814    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7815    skipCall =
7816        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7817    if (cb_data != dev_data->commandBufferMap.end()) {
7818        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBuffer()"); };
7819        cb_data->second->validate_functions.push_back(function);
7820    }
7821    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7822    skipCall |=
7823        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7824    if (cb_data != dev_data->commandBufferMap.end()) {
7825        std::function<VkBool32()> function = [=]() {
7826            set_memory_valid(dev_data, mem, true);
7827            return VK_FALSE;
7828        };
7829        cb_data->second->validate_functions.push_back(function);
7830    }
7831    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7832    // Validate that SRC & DST buffers have correct usage flags set
7833    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
7834                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7835    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7836                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7837#endif
7838    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7839    if (pCB) {
7840        skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
7841        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBuffer");
7842    }
7843    loader_platform_thread_unlock_mutex(&globalLock);
7844    if (VK_FALSE == skipCall)
7845        dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7846}
7847
7848VkBool32 VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, VkImageSubresourceLayers subLayers,
7849                                 VkImageLayout srcImageLayout) {
7850    VkBool32 skip_call = VK_FALSE;
7851
7852    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7853    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7854    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7855        uint32_t layer = i + subLayers.baseArrayLayer;
7856        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7857        IMAGE_CMD_BUF_LAYOUT_NODE node;
7858        if (!FindLayout(pCB, srcImage, sub, node)) {
7859            SetLayout(pCB, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
7860            continue;
7861        }
7862        if (node.layout != srcImageLayout) {
7863            // TODO: Improve log message in the next pass
7864            skip_call |=
7865                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7866                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
7867                                                                        "and doesn't match the current layout %s.",
7868                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
7869        }
7870    }
7871    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
7872        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7873            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7874            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7875                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7876                                 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
7877        } else {
7878            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7879                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
7880                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
7881                                 string_VkImageLayout(srcImageLayout));
7882        }
7883    }
7884    return skip_call;
7885}
7886
7887VkBool32 VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkImageSubresourceLayers subLayers,
7888                               VkImageLayout destImageLayout) {
7889    VkBool32 skip_call = VK_FALSE;
7890
7891    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7892    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7893    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7894        uint32_t layer = i + subLayers.baseArrayLayer;
7895        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7896        IMAGE_CMD_BUF_LAYOUT_NODE node;
7897        if (!FindLayout(pCB, destImage, sub, node)) {
7898            SetLayout(pCB, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
7899            continue;
7900        }
7901        if (node.layout != destImageLayout) {
7902            skip_call |=
7903                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7904                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7905                                                                        "doesn't match the current layout %s.",
7906                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7907        }
7908    }
7909    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7910        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7911            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7912            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7913                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7914                                 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7915        } else {
7916            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7917                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
7918                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
7919                                 string_VkImageLayout(destImageLayout));
7920        }
7921    }
7922    return skip_call;
7923}
7924
7925VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7926vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7927               VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
7928    VkBool32 skipCall = VK_FALSE;
7929    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7930    loader_platform_thread_lock_mutex(&globalLock);
7931#if MTMERGESOURCE
7932    VkDeviceMemory mem;
7933    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7934    // Validate that src & dst images have correct usage flags set
7935    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7936    if (cb_data != dev_data->commandBufferMap.end()) {
7937        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImage()", srcImage); };
7938        cb_data->second->validate_functions.push_back(function);
7939    }
7940    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
7941    skipCall |=
7942        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7943    if (cb_data != dev_data->commandBufferMap.end()) {
7944        std::function<VkBool32()> function = [=]() {
7945            set_memory_valid(dev_data, mem, true, dstImage);
7946            return VK_FALSE;
7947        };
7948        cb_data->second->validate_functions.push_back(function);
7949    }
7950    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
7951    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7952                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7953    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7954                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7955#endif
7956    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7957    if (pCB) {
7958        skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGE, "vkCmdCopyImage()");
7959        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImage");
7960        for (uint32_t i = 0; i < regionCount; ++i) {
7961            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].srcSubresource, srcImageLayout);
7962            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].dstSubresource, dstImageLayout);
7963        }
7964    }
7965    loader_platform_thread_unlock_mutex(&globalLock);
7966    if (VK_FALSE == skipCall)
7967        dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7968                                                      regionCount, pRegions);
7969}
7970
7971VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7972vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7973               VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
7974    VkBool32 skipCall = VK_FALSE;
7975    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7976    loader_platform_thread_lock_mutex(&globalLock);
7977#if MTMERGESOURCE
7978    VkDeviceMemory mem;
7979    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7980    // Validate that src & dst images have correct usage flags set
7981    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7982    if (cb_data != dev_data->commandBufferMap.end()) {
7983        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBlitImage()", srcImage); };
7984        cb_data->second->validate_functions.push_back(function);
7985    }
7986    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
7987    skipCall |=
7988        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7989    if (cb_data != dev_data->commandBufferMap.end()) {
7990        std::function<VkBool32()> function = [=]() {
7991            set_memory_valid(dev_data, mem, true, dstImage);
7992            return VK_FALSE;
7993        };
7994        cb_data->second->validate_functions.push_back(function);
7995    }
7996    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
7997    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7998                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7999    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8000                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8001#endif
8002    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8003    if (pCB) {
8004        skipCall |= addCmd(dev_data, pCB, CMD_BLITIMAGE, "vkCmdBlitImage()");
8005        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBlitImage");
8006    }
8007    loader_platform_thread_unlock_mutex(&globalLock);
8008    if (VK_FALSE == skipCall)
8009        dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8010                                                      regionCount, pRegions, filter);
8011}
8012
8013VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
8014                                                                  VkImage dstImage, VkImageLayout dstImageLayout,
8015                                                                  uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8016    VkBool32 skipCall = VK_FALSE;
8017    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8018    loader_platform_thread_lock_mutex(&globalLock);
8019#if MTMERGESOURCE
8020    VkDeviceMemory mem;
8021    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8022    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8023    if (cb_data != dev_data->commandBufferMap.end()) {
8024        std::function<VkBool32()> function = [=]() {
8025            set_memory_valid(dev_data, mem, true, dstImage);
8026            return VK_FALSE;
8027        };
8028        cb_data->second->validate_functions.push_back(function);
8029    }
8030    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
8031    skipCall |=
8032        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8033    if (cb_data != dev_data->commandBufferMap.end()) {
8034        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBufferToImage()"); };
8035        cb_data->second->validate_functions.push_back(function);
8036    }
8037    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
8038    // Validate that src buff & dst image have correct usage flags set
8039    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
8040                                            "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
8041    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8042                                           "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8043#endif
8044    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8045    if (pCB) {
8046        skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
8047        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBufferToImage");
8048        for (uint32_t i = 0; i < regionCount; ++i) {
8049            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].imageSubresource, dstImageLayout);
8050        }
8051    }
8052    loader_platform_thread_unlock_mutex(&globalLock);
8053    if (VK_FALSE == skipCall)
8054        dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
8055                                                              pRegions);
8056}
8057
8058VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
8059                                                                  VkImageLayout srcImageLayout, VkBuffer dstBuffer,
8060                                                                  uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8061    VkBool32 skipCall = VK_FALSE;
8062    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8063    loader_platform_thread_lock_mutex(&globalLock);
8064#if MTMERGESOURCE
8065    VkDeviceMemory mem;
8066    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8067    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8068    if (cb_data != dev_data->commandBufferMap.end()) {
8069        std::function<VkBool32()> function =
8070            [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImageToBuffer()", srcImage); };
8071        cb_data->second->validate_functions.push_back(function);
8072    }
8073    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
8074    skipCall |=
8075        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8076    if (cb_data != dev_data->commandBufferMap.end()) {
8077        std::function<VkBool32()> function = [=]() {
8078            set_memory_valid(dev_data, mem, true);
8079            return VK_FALSE;
8080        };
8081        cb_data->second->validate_functions.push_back(function);
8082    }
8083    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
8084    // Validate that dst buff & src image have correct usage flags set
8085    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8086                                           "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8087    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8088                                            "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8089#endif
8090    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8091    if (pCB) {
8092        skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
8093        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImageToBuffer");
8094        for (uint32_t i = 0; i < regionCount; ++i) {
8095            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].imageSubresource, srcImageLayout);
8096        }
8097    }
8098    loader_platform_thread_unlock_mutex(&globalLock);
8099    if (VK_FALSE == skipCall)
8100        dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
8101                                                              pRegions);
8102}
8103
8104VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
8105                                                             VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
8106    VkBool32 skipCall = VK_FALSE;
8107    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8108    loader_platform_thread_lock_mutex(&globalLock);
8109#if MTMERGESOURCE
8110    VkDeviceMemory mem;
8111    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8112    skipCall =
8113        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8114    if (cb_data != dev_data->commandBufferMap.end()) {
8115        std::function<VkBool32()> function = [=]() {
8116            set_memory_valid(dev_data, mem, true);
8117            return VK_FALSE;
8118        };
8119        cb_data->second->validate_functions.push_back(function);
8120    }
8121    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdUpdateBuffer");
8122    // Validate that dst buff has correct usage flags set
8123    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8124                                            "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8125#endif
8126    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8127    if (pCB) {
8128        skipCall |= addCmd(dev_data, pCB, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
8129        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyUpdateBuffer");
8130    }
8131    loader_platform_thread_unlock_mutex(&globalLock);
8132    if (VK_FALSE == skipCall)
8133        dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
8134}
8135
8136VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8137vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
8138    VkBool32 skipCall = VK_FALSE;
8139    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8140    loader_platform_thread_lock_mutex(&globalLock);
8141#if MTMERGESOURCE
8142    VkDeviceMemory mem;
8143    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8144    skipCall =
8145        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8146    if (cb_data != dev_data->commandBufferMap.end()) {
8147        std::function<VkBool32()> function = [=]() {
8148            set_memory_valid(dev_data, mem, true);
8149            return VK_FALSE;
8150        };
8151        cb_data->second->validate_functions.push_back(function);
8152    }
8153    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdFillBuffer");
8154    // Validate that dst buff has correct usage flags set
8155    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8156                                            "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8157#endif
8158    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8159    if (pCB) {
8160        skipCall |= addCmd(dev_data, pCB, CMD_FILLBUFFER, "vkCmdFillBuffer()");
8161        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyFillBuffer");
8162    }
8163    loader_platform_thread_unlock_mutex(&globalLock);
8164    if (VK_FALSE == skipCall)
8165        dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
8166}
8167
8168VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
8169                                                                 const VkClearAttachment *pAttachments, uint32_t rectCount,
8170                                                                 const VkClearRect *pRects) {
8171    VkBool32 skipCall = VK_FALSE;
8172    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8173    loader_platform_thread_lock_mutex(&globalLock);
8174    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8175    if (pCB) {
8176        skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
8177        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
8178        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
8179            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
8180            // TODO : commandBuffer should be srcObj
8181            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
8182            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
8183            // call CmdClearAttachments
8184            // Otherwise this seems more like a performance warning.
8185            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8186                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
8187                                "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
8188                                " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
8189                                (uint64_t)(commandBuffer));
8190        }
8191        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments");
8192    }
8193
8194    // Validate that attachment is in reference list of active subpass
8195    if (pCB->activeRenderPass) {
8196        const VkRenderPassCreateInfo *pRPCI = dev_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
8197        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
8198
8199        for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
8200            const VkClearAttachment *attachment = &pAttachments[attachment_idx];
8201            if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
8202                VkBool32 found = VK_FALSE;
8203                for (uint32_t i = 0; i < pSD->colorAttachmentCount; i++) {
8204                    if (attachment->colorAttachment == pSD->pColorAttachments[i].attachment) {
8205                        found = VK_TRUE;
8206                        break;
8207                    }
8208                }
8209                if (VK_FALSE == found) {
8210                    skipCall |= log_msg(
8211                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8212                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8213                        "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d",
8214                        attachment->colorAttachment, pCB->activeSubpass);
8215                }
8216            } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
8217                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
8218                    (pSD->pDepthStencilAttachment->attachment ==
8219                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
8220
8221                    skipCall |= log_msg(
8222                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8223                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8224                        "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found "
8225                        "in active subpass %d",
8226                        attachment->colorAttachment,
8227                        (pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED,
8228                        pCB->activeSubpass);
8229                }
8230            }
8231        }
8232    }
8233    loader_platform_thread_unlock_mutex(&globalLock);
8234    if (VK_FALSE == skipCall)
8235        dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8236}
8237
8238VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
8239                                                                VkImageLayout imageLayout, const VkClearColorValue *pColor,
8240                                                                uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
8241    VkBool32 skipCall = VK_FALSE;
8242    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8243    loader_platform_thread_lock_mutex(&globalLock);
8244#if MTMERGESOURCE
8245    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8246    VkDeviceMemory mem;
8247    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8248    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8249    if (cb_data != dev_data->commandBufferMap.end()) {
8250        std::function<VkBool32()> function = [=]() {
8251            set_memory_valid(dev_data, mem, true, image);
8252            return VK_FALSE;
8253        };
8254        cb_data->second->validate_functions.push_back(function);
8255    }
8256    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearColorImage");
8257#endif
8258    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8259    if (pCB) {
8260        skipCall |= addCmd(dev_data, pCB, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
8261        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearColorImage");
8262    }
8263    loader_platform_thread_unlock_mutex(&globalLock);
8264    if (VK_FALSE == skipCall)
8265        dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
8266}
8267
8268VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8269vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8270                            const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
8271                            const VkImageSubresourceRange *pRanges) {
8272    VkBool32 skipCall = VK_FALSE;
8273    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8274    loader_platform_thread_lock_mutex(&globalLock);
8275#if MTMERGESOURCE
8276    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8277    VkDeviceMemory mem;
8278    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8279    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8280    if (cb_data != dev_data->commandBufferMap.end()) {
8281        std::function<VkBool32()> function = [=]() {
8282            set_memory_valid(dev_data, mem, true, image);
8283            return VK_FALSE;
8284        };
8285        cb_data->second->validate_functions.push_back(function);
8286    }
8287    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearDepthStencilImage");
8288#endif
8289    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8290    if (pCB) {
8291        skipCall |= addCmd(dev_data, pCB, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
8292        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearDepthStencilImage");
8293    }
8294    loader_platform_thread_unlock_mutex(&globalLock);
8295    if (VK_FALSE == skipCall)
8296        dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
8297                                                                   pRanges);
8298}
8299
8300VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8301vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8302                  VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
8303    VkBool32 skipCall = VK_FALSE;
8304    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8305    loader_platform_thread_lock_mutex(&globalLock);
8306#if MTMERGESOURCE
8307    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8308    VkDeviceMemory mem;
8309    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8310    if (cb_data != dev_data->commandBufferMap.end()) {
8311        std::function<VkBool32()> function =
8312            [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdResolveImage()", srcImage); };
8313        cb_data->second->validate_functions.push_back(function);
8314    }
8315    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
8316    skipCall |=
8317        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8318    if (cb_data != dev_data->commandBufferMap.end()) {
8319        std::function<VkBool32()> function = [=]() {
8320            set_memory_valid(dev_data, mem, true, dstImage);
8321            return VK_FALSE;
8322        };
8323        cb_data->second->validate_functions.push_back(function);
8324    }
8325    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
8326#endif
8327    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8328    if (pCB) {
8329        skipCall |= addCmd(dev_data, pCB, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
8330        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResolveImage");
8331    }
8332    loader_platform_thread_unlock_mutex(&globalLock);
8333    if (VK_FALSE == skipCall)
8334        dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8335                                                         regionCount, pRegions);
8336}
8337
8338bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8339    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8340    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8341    if (pCB) {
8342        pCB->eventToStageMap[event] = stageMask;
8343    }
8344    auto queue_data = dev_data->queueMap.find(queue);
8345    if (queue_data != dev_data->queueMap.end()) {
8346        queue_data->second.eventToStageMap[event] = stageMask;
8347    }
8348    return false;
8349}
8350
8351VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8352vkCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8353    VkBool32 skipCall = VK_FALSE;
8354    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8355    loader_platform_thread_lock_mutex(&globalLock);
8356    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8357    if (pCB) {
8358        skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
8359        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
8360        pCB->events.push_back(event);
8361        std::function<bool(VkQueue)> eventUpdate =
8362            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
8363        pCB->eventUpdates.push_back(eventUpdate);
8364    }
8365    loader_platform_thread_unlock_mutex(&globalLock);
8366    if (VK_FALSE == skipCall)
8367        dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask);
8368}
8369
8370VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8371vkCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8372    VkBool32 skipCall = VK_FALSE;
8373    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8374    loader_platform_thread_lock_mutex(&globalLock);
8375    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8376    if (pCB) {
8377        skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
8378        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
8379        pCB->events.push_back(event);
8380        std::function<bool(VkQueue)> eventUpdate =
8381            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
8382        pCB->eventUpdates.push_back(eventUpdate);
8383    }
8384    loader_platform_thread_unlock_mutex(&globalLock);
8385    if (VK_FALSE == skipCall)
8386        dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask);
8387}
8388
8389VkBool32 TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount, const VkImageMemoryBarrier *pImgMemBarriers) {
8390    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8391    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8392    VkBool32 skip = VK_FALSE;
8393    uint32_t levelCount = 0;
8394    uint32_t layerCount = 0;
8395
8396    for (uint32_t i = 0; i < memBarrierCount; ++i) {
8397        auto mem_barrier = &pImgMemBarriers[i];
8398        if (!mem_barrier)
8399            continue;
8400        // TODO: Do not iterate over every possibility - consolidate where
8401        // possible
8402        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
8403
8404        for (uint32_t j = 0; j < levelCount; j++) {
8405            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
8406            for (uint32_t k = 0; k < layerCount; k++) {
8407                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
8408                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
8409                IMAGE_CMD_BUF_LAYOUT_NODE node;
8410                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
8411                    SetLayout(pCB, mem_barrier->image, sub,
8412                              IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
8413                    continue;
8414                }
8415                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
8416                    // TODO: Set memory invalid which is in mem_tracker currently
8417                } else if (node.layout != mem_barrier->oldLayout) {
8418                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8419                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
8420                                                                                    "when current layout is %s.",
8421                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
8422                }
8423                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
8424            }
8425        }
8426    }
8427    return skip;
8428}
8429
8430// Print readable FlagBits in FlagMask
8431std::string string_VkAccessFlags(VkAccessFlags accessMask) {
8432    std::string result;
8433    std::string separator;
8434
8435    if (accessMask == 0) {
8436        result = "[None]";
8437    } else {
8438        result = "[";
8439        for (auto i = 0; i < 32; i++) {
8440            if (accessMask & (1 << i)) {
8441                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
8442                separator = " | ";
8443            }
8444        }
8445        result = result + "]";
8446    }
8447    return result;
8448}
8449
8450// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
8451// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
8452// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
8453VkBool32 ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8454                          const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits, const char *type) {
8455    VkBool32 skip_call = VK_FALSE;
8456
8457    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
8458        if (accessMask & !(required_bit | optional_bits)) {
8459            // TODO: Verify against Valid Use
8460            skip_call |=
8461                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8462                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8463                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8464        }
8465    } else {
8466        if (!required_bit) {
8467            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8468                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
8469                                                                  "%s when layout is %s, unless the app has previously added a "
8470                                                                  "barrier for this transition.",
8471                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
8472                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
8473        } else {
8474            std::string opt_bits;
8475            if (optional_bits != 0) {
8476                std::stringstream ss;
8477                ss << optional_bits;
8478                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
8479            }
8480            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8481                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
8482                                                                  "layout is %s, unless the app has previously added a barrier for "
8483                                                                  "this transition.",
8484                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
8485                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
8486        }
8487    }
8488    return skip_call;
8489}
8490
8491VkBool32 ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8492                                     const VkImageLayout &layout, const char *type) {
8493    VkBool32 skip_call = VK_FALSE;
8494    switch (layout) {
8495    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
8496        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
8497                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
8498        break;
8499    }
8500    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
8501        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
8502                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
8503        break;
8504    }
8505    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
8506        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
8507        break;
8508    }
8509    case VK_IMAGE_LAYOUT_PREINITIALIZED: {
8510        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
8511        break;
8512    }
8513    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
8514        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8515                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8516        break;
8517    }
8518    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
8519        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8520                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8521        break;
8522    }
8523    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
8524        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
8525        break;
8526    }
8527    case VK_IMAGE_LAYOUT_UNDEFINED: {
8528        if (accessMask != 0) {
8529            // TODO: Verify against Valid Use section spec
8530            skip_call |=
8531                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8532                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8533                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8534        }
8535        break;
8536    }
8537    case VK_IMAGE_LAYOUT_GENERAL:
8538    default: { break; }
8539    }
8540    return skip_call;
8541}
8542
8543VkBool32 ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8544                          const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
8545                          const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
8546                          const VkImageMemoryBarrier *pImageMemBarriers) {
8547    VkBool32 skip_call = VK_FALSE;
8548    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8549    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8550    if (pCB->activeRenderPass && memBarrierCount) {
8551        if (!dev_data->renderPassMap[pCB->activeRenderPass]->hasSelfDependency[pCB->activeSubpass]) {
8552            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8553                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
8554                                                                  "with no self dependency specified.",
8555                                 funcName, pCB->activeSubpass);
8556        }
8557    }
8558    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
8559        auto mem_barrier = &pImageMemBarriers[i];
8560        auto image_data = dev_data->imageMap.find(mem_barrier->image);
8561        if (image_data != dev_data->imageMap.end()) {
8562            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
8563            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
8564            if (image_data->second.createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
8565                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
8566                // be VK_QUEUE_FAMILY_IGNORED
8567                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
8568                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8569                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8570                                         "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
8571                                         "VK_SHARING_MODE_CONCURRENT.  Src and dst "
8572                                         " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
8573                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8574                }
8575            } else {
8576                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
8577                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
8578                // or both be a valid queue family
8579                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
8580                    (src_q_f_index != dst_q_f_index)) {
8581                    skip_call |=
8582                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8583                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
8584                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
8585                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
8586                                                                     "must be.",
8587                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8588                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
8589                           ((src_q_f_index >= dev_data->physDevProperties.queue_family_properties.size()) ||
8590                            (dst_q_f_index >= dev_data->physDevProperties.queue_family_properties.size()))) {
8591                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8592                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8593                                         "%s: Image 0x%" PRIx64 " was created with sharingMode "
8594                                         "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
8595                                         " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
8596                                         "queueFamilies crated for this device.",
8597                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
8598                                         dst_q_f_index, dev_data->physDevProperties.queue_family_properties.size());
8599                }
8600            }
8601        }
8602
8603        if (mem_barrier) {
8604            skip_call |=
8605                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
8606            skip_call |=
8607                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
8608            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
8609                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8610                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
8611                                                         "PREINITIALIZED.",
8612                        funcName);
8613            }
8614            auto image_data = dev_data->imageMap.find(mem_barrier->image);
8615            VkFormat format;
8616            uint32_t arrayLayers, mipLevels;
8617            bool imageFound = false;
8618            if (image_data != dev_data->imageMap.end()) {
8619                format = image_data->second.createInfo.format;
8620                arrayLayers = image_data->second.createInfo.arrayLayers;
8621                mipLevels = image_data->second.createInfo.mipLevels;
8622                imageFound = true;
8623            } else if (dev_data->device_extensions.wsi_enabled) {
8624                auto imageswap_data = dev_data->device_extensions.imageToSwapchainMap.find(mem_barrier->image);
8625                if (imageswap_data != dev_data->device_extensions.imageToSwapchainMap.end()) {
8626                    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(imageswap_data->second);
8627                    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
8628                        format = swapchain_data->second->createInfo.imageFormat;
8629                        arrayLayers = swapchain_data->second->createInfo.imageArrayLayers;
8630                        mipLevels = 1;
8631                        imageFound = true;
8632                    }
8633                }
8634            }
8635            if (imageFound) {
8636                if (vk_format_is_depth_and_stencil(format) &&
8637                    (!(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
8638                     !(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))) {
8639                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8640                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth and stencil format and thus must "
8641                                                             "have both VK_IMAGE_ASPECT_DEPTH_BIT and "
8642                                                             "VK_IMAGE_ASPECT_STENCIL_BIT set.",
8643                            funcName);
8644                }
8645                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
8646                                     ? 1
8647                                     : mem_barrier->subresourceRange.layerCount;
8648                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
8649                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8650                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
8651                                                             "baseArrayLayer (%d) and layerCount (%d) be less "
8652                                                             "than or equal to the total number of layers (%d).",
8653                            funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
8654                            arrayLayers);
8655                }
8656                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
8657                                     ? 1
8658                                     : mem_barrier->subresourceRange.levelCount;
8659                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
8660                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8661                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
8662                                                             "(%d) and levelCount (%d) be less than or equal to "
8663                                                             "the total number of levels (%d).",
8664                            funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
8665                            mipLevels);
8666                }
8667            }
8668        }
8669    }
8670    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
8671        auto mem_barrier = &pBufferMemBarriers[i];
8672        if (pCB->activeRenderPass) {
8673            skip_call |=
8674                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8675                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
8676        }
8677        if (!mem_barrier)
8678            continue;
8679
8680        // Validate buffer barrier queue family indices
8681        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8682             mem_barrier->srcQueueFamilyIndex >= dev_data->physDevProperties.queue_family_properties.size()) ||
8683            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8684             mem_barrier->dstQueueFamilyIndex >= dev_data->physDevProperties.queue_family_properties.size())) {
8685            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8686                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8687                                 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
8688                                 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
8689                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8690                                 dev_data->physDevProperties.queue_family_properties.size());
8691        }
8692
8693        auto buffer_data = dev_data->bufferMap.find(mem_barrier->buffer);
8694        uint64_t buffer_size =
8695            buffer_data->second.create_info ? reinterpret_cast<uint64_t &>(buffer_data->second.create_info->size) : 0;
8696        if (buffer_data != dev_data->bufferMap.end()) {
8697            if (mem_barrier->offset >= buffer_size) {
8698                skip_call |=
8699                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8700                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64
8701                                                             " whose sum is not less than total size %" PRIu64 ".",
8702                            funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8703                            reinterpret_cast<const uint64_t &>(mem_barrier->offset), buffer_size);
8704            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
8705                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8706                                     __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
8707                                     "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64 " and size %" PRIu64
8708                                     " whose sum is greater than total size %" PRIu64 ".",
8709                                     funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8710                                     reinterpret_cast<const uint64_t &>(mem_barrier->offset),
8711                                     reinterpret_cast<const uint64_t &>(mem_barrier->size), buffer_size);
8712            }
8713        }
8714    }
8715    return skip_call;
8716}
8717
8718bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
8719    bool skip_call = false;
8720    VkPipelineStageFlags stageMask = 0;
8721    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
8722    for (uint32_t i = 0; i < eventCount; ++i) {
8723        auto event = pCB->events[firstEventIndex + i];
8724        auto queue_data = dev_data->queueMap.find(queue);
8725        if (queue_data == dev_data->queueMap.end())
8726            return false;
8727        auto event_data = queue_data->second.eventToStageMap.find(event);
8728        if (event_data != queue_data->second.eventToStageMap.end()) {
8729            stageMask |= event_data->second;
8730        } else {
8731            auto global_event_data = dev_data->eventMap.find(event);
8732            if (global_event_data == dev_data->eventMap.end()) {
8733                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8734                                     reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
8735                                     "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
8736                                     reinterpret_cast<const uint64_t &>(event));
8737            } else {
8738                stageMask |= global_event_data->second.stageMask;
8739            }
8740        }
8741    }
8742    if (sourceStageMask != stageMask) {
8743        skip_call |=
8744            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8745                    DRAWSTATE_INVALID_EVENT, "DS",
8746                    "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%x which must be the bitwise OR of the "
8747                    "stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with vkSetEvent.",
8748                    sourceStageMask);
8749    }
8750    return skip_call;
8751}
8752
8753VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8754vkCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
8755                VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8756                uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8757                uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8758    VkBool32 skipCall = VK_FALSE;
8759    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8760    loader_platform_thread_lock_mutex(&globalLock);
8761    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8762    if (pCB) {
8763        auto firstEventIndex = pCB->events.size();
8764        for (uint32_t i = 0; i < eventCount; ++i) {
8765            pCB->waitedEvents.push_back(pEvents[i]);
8766            pCB->events.push_back(pEvents[i]);
8767        }
8768        std::function<bool(VkQueue)> eventUpdate =
8769            std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask);
8770        pCB->eventUpdates.push_back(eventUpdate);
8771        if (pCB->state == CB_RECORDING) {
8772            skipCall |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
8773        } else {
8774            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
8775        }
8776        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8777        skipCall |=
8778            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8779                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8780    }
8781    loader_platform_thread_unlock_mutex(&globalLock);
8782    if (VK_FALSE == skipCall)
8783        dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
8784                                                       memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8785                                                       pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8786}
8787
8788VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8789vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
8790                     VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8791                     uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8792                     uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8793    VkBool32 skipCall = VK_FALSE;
8794    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8795    loader_platform_thread_lock_mutex(&globalLock);
8796    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8797    if (pCB) {
8798        skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8799        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8800        skipCall |=
8801            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8802                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8803    }
8804    loader_platform_thread_unlock_mutex(&globalLock);
8805    if (VK_FALSE == skipCall)
8806        dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
8807                                                            memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8808                                                            pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8809}
8810
8811VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8812vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8813    VkBool32 skipCall = VK_FALSE;
8814    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8815    loader_platform_thread_lock_mutex(&globalLock);
8816    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8817    if (pCB) {
8818        QueryObject query = {queryPool, slot};
8819        pCB->activeQueries.insert(query);
8820        if (!pCB->startedQueries.count(query)) {
8821            pCB->startedQueries.insert(query);
8822        }
8823        skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8824    }
8825    loader_platform_thread_unlock_mutex(&globalLock);
8826    if (VK_FALSE == skipCall)
8827        dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
8828}
8829
8830VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8831    VkBool32 skipCall = VK_FALSE;
8832    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8833    loader_platform_thread_lock_mutex(&globalLock);
8834    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8835    if (pCB) {
8836        QueryObject query = {queryPool, slot};
8837        if (!pCB->activeQueries.count(query)) {
8838            skipCall |=
8839                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8840                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool %" PRIu64 ", index %d",
8841                        (uint64_t)(queryPool), slot);
8842        } else {
8843            pCB->activeQueries.erase(query);
8844        }
8845        pCB->queryToStateMap[query] = 1;
8846        if (pCB->state == CB_RECORDING) {
8847            skipCall |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
8848        } else {
8849            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
8850        }
8851    }
8852    loader_platform_thread_unlock_mutex(&globalLock);
8853    if (VK_FALSE == skipCall)
8854        dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
8855}
8856
8857VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8858vkCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
8859    VkBool32 skipCall = VK_FALSE;
8860    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8861    loader_platform_thread_lock_mutex(&globalLock);
8862    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8863    if (pCB) {
8864        for (uint32_t i = 0; i < queryCount; i++) {
8865            QueryObject query = {queryPool, firstQuery + i};
8866            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
8867            pCB->queryToStateMap[query] = 0;
8868        }
8869        if (pCB->state == CB_RECORDING) {
8870            skipCall |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8871        } else {
8872            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
8873        }
8874        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
8875    }
8876    loader_platform_thread_unlock_mutex(&globalLock);
8877    if (VK_FALSE == skipCall)
8878        dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8879}
8880
8881VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8882vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
8883                          VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
8884    VkBool32 skipCall = VK_FALSE;
8885    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8886    loader_platform_thread_lock_mutex(&globalLock);
8887    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8888#if MTMERGESOURCE
8889    VkDeviceMemory mem;
8890    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8891    skipCall |=
8892        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8893    if (cb_data != dev_data->commandBufferMap.end()) {
8894        std::function<VkBool32()> function = [=]() {
8895            set_memory_valid(dev_data, mem, true);
8896            return VK_FALSE;
8897        };
8898        cb_data->second->validate_functions.push_back(function);
8899    }
8900    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyQueryPoolResults");
8901    // Validate that DST buffer has correct usage flags set
8902    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8903                                            "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8904#endif
8905    if (pCB) {
8906        for (uint32_t i = 0; i < queryCount; i++) {
8907            QueryObject query = {queryPool, firstQuery + i};
8908            if (!pCB->queryToStateMap[query]) {
8909                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8910                                    __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
8911                                    "Requesting a copy from query to buffer with invalid query: queryPool %" PRIu64 ", index %d",
8912                                    (uint64_t)(queryPool), firstQuery + i);
8913            }
8914        }
8915        if (pCB->state == CB_RECORDING) {
8916            skipCall |= addCmd(dev_data, pCB, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8917        } else {
8918            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
8919        }
8920        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyQueryPoolResults");
8921    }
8922    loader_platform_thread_unlock_mutex(&globalLock);
8923    if (VK_FALSE == skipCall)
8924        dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
8925                                                                 dstOffset, stride, flags);
8926}
8927
8928VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
8929                                                              VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
8930                                                              const void *pValues) {
8931    bool skipCall = false;
8932    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8933    loader_platform_thread_lock_mutex(&globalLock);
8934    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8935    if (pCB) {
8936        if (pCB->state == CB_RECORDING) {
8937            skipCall |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8938        } else {
8939            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
8940        }
8941    }
8942    if ((offset + size) > dev_data->physDevProperties.properties.limits.maxPushConstantsSize) {
8943        skipCall |= validatePushConstantSize(dev_data, offset, size, "vkCmdPushConstants()");
8944    }
8945    // TODO : Add warning if push constant update doesn't align with range
8946    loader_platform_thread_unlock_mutex(&globalLock);
8947    if (!skipCall)
8948        dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8949}
8950
8951VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8952vkCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
8953    VkBool32 skipCall = VK_FALSE;
8954    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8955    loader_platform_thread_lock_mutex(&globalLock);
8956    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8957    if (pCB) {
8958        QueryObject query = {queryPool, slot};
8959        pCB->queryToStateMap[query] = 1;
8960        if (pCB->state == CB_RECORDING) {
8961            skipCall |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8962        } else {
8963            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
8964        }
8965    }
8966    loader_platform_thread_unlock_mutex(&globalLock);
8967    if (VK_FALSE == skipCall)
8968        dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8969}
8970
8971VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
8972                                                                   const VkAllocationCallbacks *pAllocator,
8973                                                                   VkFramebuffer *pFramebuffer) {
8974    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8975    VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
8976    if (VK_SUCCESS == result) {
8977        // Shadow create info and store in map
8978        loader_platform_thread_lock_mutex(&globalLock);
8979
8980        auto & fbNode = dev_data->frameBufferMap[*pFramebuffer];
8981        fbNode.createInfo = *pCreateInfo;
8982        if (pCreateInfo->pAttachments) {
8983            auto attachments = new VkImageView[pCreateInfo->attachmentCount];
8984            memcpy(attachments,
8985                   pCreateInfo->pAttachments,
8986                   pCreateInfo->attachmentCount * sizeof(VkImageView));
8987            fbNode.createInfo.pAttachments = attachments;
8988        }
8989        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8990            VkImageView view = pCreateInfo->pAttachments[i];
8991            auto view_data = dev_data->imageViewMap.find(view);
8992            if (view_data == dev_data->imageViewMap.end()) {
8993                continue;
8994            }
8995            MT_FB_ATTACHMENT_INFO fb_info;
8996            get_mem_binding_from_object(dev_data, device, (uint64_t)(view_data->second.image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8997                                        &fb_info.mem);
8998            fb_info.image = view_data->second.image;
8999            fbNode.attachments.push_back(fb_info);
9000        }
9001
9002        loader_platform_thread_unlock_mutex(&globalLock);
9003    }
9004    return result;
9005}
9006
9007VkBool32 FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
9008                        std::unordered_set<uint32_t> &processed_nodes) {
9009    // If we have already checked this node we have not found a dependency path so return false.
9010    if (processed_nodes.count(index))
9011        return VK_FALSE;
9012    processed_nodes.insert(index);
9013    const DAGNode &node = subpass_to_node[index];
9014    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
9015    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
9016        for (auto elem : node.prev) {
9017            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
9018                return VK_TRUE;
9019        }
9020    } else {
9021        return VK_TRUE;
9022    }
9023    return VK_FALSE;
9024}
9025
9026VkBool32 CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
9027                               const std::vector<DAGNode> &subpass_to_node, VkBool32 &skip_call) {
9028    VkBool32 result = VK_TRUE;
9029    // Loop through all subpasses that share the same attachment and make sure a dependency exists
9030    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
9031        if (subpass == dependent_subpasses[k])
9032            continue;
9033        const DAGNode &node = subpass_to_node[subpass];
9034        // Check for a specified dependency between the two nodes. If one exists we are done.
9035        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
9036        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
9037        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
9038            // If no dependency exits an implicit dependency still might. If so, warn and if not throw an error.
9039            std::unordered_set<uint32_t> processed_nodes;
9040            if (FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
9041                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes)) {
9042                // TODO: Verify against Valid Use section of spec
9043                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9044                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9045                                     "A dependency between subpasses %d and %d must exist but only an implicit one is specified.",
9046                                     subpass, dependent_subpasses[k]);
9047            } else {
9048                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9049                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9050                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
9051                                     dependent_subpasses[k]);
9052                result = VK_FALSE;
9053            }
9054        }
9055    }
9056    return result;
9057}
9058
9059VkBool32 CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
9060                        const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, VkBool32 &skip_call) {
9061    const DAGNode &node = subpass_to_node[index];
9062    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
9063    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9064    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9065        if (attachment == subpass.pColorAttachments[j].attachment)
9066            return VK_TRUE;
9067    }
9068    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9069        if (attachment == subpass.pDepthStencilAttachment->attachment)
9070            return VK_TRUE;
9071    }
9072    VkBool32 result = VK_FALSE;
9073    // Loop through previous nodes and see if any of them write to the attachment.
9074    for (auto elem : node.prev) {
9075        result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
9076    }
9077    // If the attachment was written to by a previous node than this node needs to preserve it.
9078    if (result && depth > 0) {
9079        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9080        VkBool32 has_preserved = VK_FALSE;
9081        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9082            if (subpass.pPreserveAttachments[j] == attachment) {
9083                has_preserved = VK_TRUE;
9084                break;
9085            }
9086        }
9087        if (has_preserved == VK_FALSE) {
9088            skip_call |=
9089                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9090                        DRAWSTATE_INVALID_RENDERPASS, "DS",
9091                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
9092        }
9093    }
9094    return result;
9095}
9096
9097template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
9098    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
9099           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
9100}
9101
9102bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
9103    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
9104            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
9105}
9106
9107VkBool32 ValidateDependencies(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin,
9108                              const std::vector<DAGNode> &subpass_to_node) {
9109    VkBool32 skip_call = VK_FALSE;
9110    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
9111    const VkRenderPassCreateInfo *pCreateInfo = my_data->renderPassMap.at(pRenderPassBegin->renderPass)->pCreateInfo;
9112    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
9113    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
9114    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
9115    // Find overlapping attachments
9116    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9117        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
9118            VkImageView viewi = pFramebufferInfo->pAttachments[i];
9119            VkImageView viewj = pFramebufferInfo->pAttachments[j];
9120            if (viewi == viewj) {
9121                overlapping_attachments[i].push_back(j);
9122                overlapping_attachments[j].push_back(i);
9123                continue;
9124            }
9125            auto view_data_i = my_data->imageViewMap.find(viewi);
9126            auto view_data_j = my_data->imageViewMap.find(viewj);
9127            if (view_data_i == my_data->imageViewMap.end() || view_data_j == my_data->imageViewMap.end()) {
9128                continue;
9129            }
9130            if (view_data_i->second.image == view_data_j->second.image &&
9131                isRegionOverlapping(view_data_i->second.subresourceRange, view_data_j->second.subresourceRange)) {
9132                overlapping_attachments[i].push_back(j);
9133                overlapping_attachments[j].push_back(i);
9134                continue;
9135            }
9136            auto image_data_i = my_data->imageMap.find(view_data_i->second.image);
9137            auto image_data_j = my_data->imageMap.find(view_data_j->second.image);
9138            if (image_data_i == my_data->imageMap.end() || image_data_j == my_data->imageMap.end()) {
9139                continue;
9140            }
9141            if (image_data_i->second.mem == image_data_j->second.mem &&
9142                isRangeOverlapping(image_data_i->second.memOffset, image_data_i->second.memSize, image_data_j->second.memOffset,
9143                                   image_data_j->second.memSize)) {
9144                overlapping_attachments[i].push_back(j);
9145                overlapping_attachments[j].push_back(i);
9146            }
9147        }
9148    }
9149    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
9150        uint32_t attachment = i;
9151        for (auto other_attachment : overlapping_attachments[i]) {
9152            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9153                skip_call |=
9154                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9155                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9156                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9157                            attachment, other_attachment);
9158            }
9159            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9160                skip_call |=
9161                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9162                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9163                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9164                            other_attachment, attachment);
9165            }
9166        }
9167    }
9168    // Find for each attachment the subpasses that use them.
9169    unordered_set<uint32_t> attachmentIndices;
9170    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9171        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9172        attachmentIndices.clear();
9173        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9174            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9175            input_attachment_to_subpass[attachment].push_back(i);
9176            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9177                input_attachment_to_subpass[overlapping_attachment].push_back(i);
9178            }
9179        }
9180        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9181            uint32_t attachment = subpass.pColorAttachments[j].attachment;
9182            output_attachment_to_subpass[attachment].push_back(i);
9183            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9184                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9185            }
9186            attachmentIndices.insert(attachment);
9187        }
9188        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9189            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9190            output_attachment_to_subpass[attachment].push_back(i);
9191            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9192                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9193            }
9194
9195            if (attachmentIndices.count(attachment)) {
9196                skip_call |=
9197                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
9198                            0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9199                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).",
9200                            attachment, i);
9201            }
9202        }
9203    }
9204    // If there is a dependency needed make sure one exists
9205    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9206        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9207        // If the attachment is an input then all subpasses that output must have a dependency relationship
9208        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9209            const uint32_t &attachment = subpass.pInputAttachments[j].attachment;
9210            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9211        }
9212        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
9213        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9214            const uint32_t &attachment = subpass.pColorAttachments[j].attachment;
9215            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9216            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9217        }
9218        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9219            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
9220            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9221            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9222        }
9223    }
9224    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
9225    // written.
9226    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9227        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9228        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9229            CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
9230        }
9231    }
9232    return skip_call;
9233}
9234
9235VkBool32 ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
9236    VkBool32 skip = VK_FALSE;
9237
9238    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9239        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9240        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9241            if (subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
9242                subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
9243                if (subpass.pInputAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
9244                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9245                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9246                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9247                                    "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
9248                } else {
9249                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9250                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9251                                    "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
9252                                    string_VkImageLayout(subpass.pInputAttachments[j].layout));
9253                }
9254            }
9255        }
9256        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9257            if (subpass.pColorAttachments[j].layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
9258                if (subpass.pColorAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
9259                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9260                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9261                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9262                                    "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
9263                } else {
9264                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9265                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9266                                    "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
9267                                    string_VkImageLayout(subpass.pColorAttachments[j].layout));
9268                }
9269            }
9270        }
9271        if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9272            if (subpass.pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
9273                if (subpass.pDepthStencilAttachment->layout == VK_IMAGE_LAYOUT_GENERAL) {
9274                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9275                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9276                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9277                                    "Layout for depth attachment is GENERAL but should be DEPTH_STENCIL_ATTACHMENT_OPTIMAL.");
9278                } else {
9279                    skip |=
9280                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9281                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9282                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.",
9283                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
9284                }
9285            }
9286        }
9287    }
9288    return skip;
9289}
9290
9291VkBool32 CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9292                       std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
9293    VkBool32 skip_call = VK_FALSE;
9294    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9295        DAGNode &subpass_node = subpass_to_node[i];
9296        subpass_node.pass = i;
9297    }
9298    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9299        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
9300        if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
9301            dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9302            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9303                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
9304                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
9305        } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
9306            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9307                                 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
9308        } else if (dependency.srcSubpass == dependency.dstSubpass) {
9309            has_self_dependency[dependency.srcSubpass] = true;
9310        }
9311        if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9312            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
9313        }
9314        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
9315            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
9316        }
9317    }
9318    return skip_call;
9319}
9320
9321
9322VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
9323                                                                    const VkAllocationCallbacks *pAllocator,
9324                                                                    VkShaderModule *pShaderModule) {
9325    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9326    VkBool32 skip_call = VK_FALSE;
9327    if (!shader_is_spirv(pCreateInfo)) {
9328        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
9329                             /* dev */ 0, __LINE__, SHADER_CHECKER_NON_SPIRV_SHADER, "SC", "Shader is not SPIR-V");
9330    }
9331
9332    if (VK_FALSE != skip_call)
9333        return VK_ERROR_VALIDATION_FAILED_EXT;
9334
9335    VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
9336
9337    if (res == VK_SUCCESS) {
9338        loader_platform_thread_lock_mutex(&globalLock);
9339        my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
9340        loader_platform_thread_unlock_mutex(&globalLock);
9341    }
9342    return res;
9343}
9344
9345VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9346                                                                  const VkAllocationCallbacks *pAllocator,
9347                                                                  VkRenderPass *pRenderPass) {
9348    VkBool32 skip_call = VK_FALSE;
9349    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9350    loader_platform_thread_lock_mutex(&globalLock);
9351    // Create DAG
9352    std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
9353    std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
9354    skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
9355    // Validate
9356    skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
9357    if (VK_FALSE != skip_call) {
9358        loader_platform_thread_unlock_mutex(&globalLock);
9359        return VK_ERROR_VALIDATION_FAILED_EXT;
9360    }
9361    loader_platform_thread_unlock_mutex(&globalLock);
9362    VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
9363    if (VK_SUCCESS == result) {
9364        loader_platform_thread_lock_mutex(&globalLock);
9365        // TODOSC : Merge in tracking of renderpass from shader_checker
9366        // Shadow create info and store in map
9367        VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
9368        if (pCreateInfo->pAttachments) {
9369            localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount];
9370            memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments,
9371                   localRPCI->attachmentCount * sizeof(VkAttachmentDescription));
9372        }
9373        if (pCreateInfo->pSubpasses) {
9374            localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount];
9375            memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription));
9376
9377            for (uint32_t i = 0; i < localRPCI->subpassCount; i++) {
9378                VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i];
9379                const uint32_t attachmentCount = subpass->inputAttachmentCount +
9380                                                 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) +
9381                                                 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
9382                VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount];
9383
9384                memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount);
9385                subpass->pInputAttachments = attachments;
9386                attachments += subpass->inputAttachmentCount;
9387
9388                memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9389                subpass->pColorAttachments = attachments;
9390                attachments += subpass->colorAttachmentCount;
9391
9392                if (subpass->pResolveAttachments) {
9393                    memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9394                    subpass->pResolveAttachments = attachments;
9395                    attachments += subpass->colorAttachmentCount;
9396                }
9397
9398                if (subpass->pDepthStencilAttachment) {
9399                    memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1);
9400                    subpass->pDepthStencilAttachment = attachments;
9401                    attachments += 1;
9402                }
9403
9404                memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount);
9405                subpass->pPreserveAttachments = &attachments->attachment;
9406            }
9407        }
9408        if (pCreateInfo->pDependencies) {
9409            localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
9410            memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies,
9411                   localRPCI->dependencyCount * sizeof(VkSubpassDependency));
9412        }
9413        dev_data->renderPassMap[*pRenderPass] = new RENDER_PASS_NODE(localRPCI);
9414        dev_data->renderPassMap[*pRenderPass]->hasSelfDependency = has_self_dependency;
9415        dev_data->renderPassMap[*pRenderPass]->subpassToNode = subpass_to_node;
9416#if MTMERGESOURCE
9417        // MTMTODO : Merge with code from above to eliminate duplication
9418        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9419            VkAttachmentDescription desc = pCreateInfo->pAttachments[i];
9420            MT_PASS_ATTACHMENT_INFO pass_info;
9421            pass_info.load_op = desc.loadOp;
9422            pass_info.store_op = desc.storeOp;
9423            pass_info.attachment = i;
9424            dev_data->renderPassMap[*pRenderPass]->attachments.push_back(pass_info);
9425        }
9426        // TODO: Maybe fill list and then copy instead of locking
9427        std::unordered_map<uint32_t, bool> &attachment_first_read = dev_data->renderPassMap[*pRenderPass]->attachment_first_read;
9428        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout =
9429            dev_data->renderPassMap[*pRenderPass]->attachment_first_layout;
9430        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9431            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9432            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9433                uint32_t attachment = subpass.pColorAttachments[j].attachment;
9434                if (attachment >= pCreateInfo->attachmentCount) {
9435                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9436                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9437                                         "Color attachment %d cannot be greater than the total number of attachments %d.",
9438                                         attachment, pCreateInfo->attachmentCount);
9439                    continue;
9440                }
9441                if (attachment_first_read.count(attachment))
9442                    continue;
9443                attachment_first_read.insert(std::make_pair(attachment, false));
9444                attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
9445            }
9446            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9447                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9448                if (attachment >= pCreateInfo->attachmentCount) {
9449                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9450                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9451                                         "Depth stencil attachment %d cannot be greater than the total number of attachments %d.",
9452                                         attachment, pCreateInfo->attachmentCount);
9453                    continue;
9454                }
9455                if (attachment_first_read.count(attachment))
9456                    continue;
9457                attachment_first_read.insert(std::make_pair(attachment, false));
9458                attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
9459            }
9460            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9461                uint32_t attachment = subpass.pInputAttachments[j].attachment;
9462                if (attachment >= pCreateInfo->attachmentCount) {
9463                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9464                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9465                                         "Input attachment %d cannot be greater than the total number of attachments %d.",
9466                                         attachment, pCreateInfo->attachmentCount);
9467                    continue;
9468                }
9469                if (attachment_first_read.count(attachment))
9470                    continue;
9471                attachment_first_read.insert(std::make_pair(attachment, true));
9472                attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
9473            }
9474        }
9475#endif
9476        loader_platform_thread_unlock_mutex(&globalLock);
9477    }
9478    return result;
9479}
9480// Free the renderpass shadow
9481static void deleteRenderPasses(layer_data *my_data) {
9482    if (my_data->renderPassMap.size() <= 0)
9483        return;
9484    for (auto ii = my_data->renderPassMap.begin(); ii != my_data->renderPassMap.end(); ++ii) {
9485        const VkRenderPassCreateInfo *pRenderPassInfo = (*ii).second->pCreateInfo;
9486        delete[] pRenderPassInfo->pAttachments;
9487        if (pRenderPassInfo->pSubpasses) {
9488            for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) {
9489                // Attachements are all allocated in a block, so just need to
9490                //  find the first non-null one to delete
9491                if (pRenderPassInfo->pSubpasses[i].pInputAttachments) {
9492                    delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments;
9493                } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) {
9494                    delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments;
9495                } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) {
9496                    delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments;
9497                } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) {
9498                    delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments;
9499                }
9500            }
9501            delete[] pRenderPassInfo->pSubpasses;
9502        }
9503        delete[] pRenderPassInfo->pDependencies;
9504        delete pRenderPassInfo;
9505        delete (*ii).second;
9506    }
9507    my_data->renderPassMap.clear();
9508}
9509
9510VkBool32 VerifyFramebufferAndRenderPassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9511    VkBool32 skip_call = VK_FALSE;
9512    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9513    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9514    const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
9515    const VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer].createInfo;
9516    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
9517        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9518                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
9519                                                                 "with a different number of attachments.");
9520    }
9521    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9522        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9523        auto image_data = dev_data->imageViewMap.find(image_view);
9524        assert(image_data != dev_data->imageViewMap.end());
9525        const VkImage &image = image_data->second.image;
9526        const VkImageSubresourceRange &subRange = image_data->second.subresourceRange;
9527        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
9528                                             pRenderPassInfo->pAttachments[i].initialLayout};
9529        // TODO: Do not iterate over every possibility - consolidate where possible
9530        for (uint32_t j = 0; j < subRange.levelCount; j++) {
9531            uint32_t level = subRange.baseMipLevel + j;
9532            for (uint32_t k = 0; k < subRange.layerCount; k++) {
9533                uint32_t layer = subRange.baseArrayLayer + k;
9534                VkImageSubresource sub = {subRange.aspectMask, level, layer};
9535                IMAGE_CMD_BUF_LAYOUT_NODE node;
9536                if (!FindLayout(pCB, image, sub, node)) {
9537                    SetLayout(pCB, image, sub, newNode);
9538                    continue;
9539                }
9540                if (newNode.layout != node.layout) {
9541                    skip_call |=
9542                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9543                                DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using attachment %i "
9544                                                                    "where the "
9545                                                                    "initial layout is %s and the layout of the attachment at the "
9546                                                                    "start of the render pass is %s. The layouts must match.",
9547                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
9548                }
9549            }
9550        }
9551    }
9552    return skip_call;
9553}
9554
9555void TransitionSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const int subpass_index) {
9556    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9557    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9558    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9559    if (render_pass_data == dev_data->renderPassMap.end()) {
9560        return;
9561    }
9562    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9563    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9564    if (framebuffer_data == dev_data->frameBufferMap.end()) {
9565        return;
9566    }
9567    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9568    const VkSubpassDescription &subpass = pRenderPassInfo->pSubpasses[subpass_index];
9569    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9570        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pInputAttachments[j].attachment];
9571        SetLayout(dev_data, pCB, image_view, subpass.pInputAttachments[j].layout);
9572    }
9573    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9574        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pColorAttachments[j].attachment];
9575        SetLayout(dev_data, pCB, image_view, subpass.pColorAttachments[j].layout);
9576    }
9577    if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9578        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pDepthStencilAttachment->attachment];
9579        SetLayout(dev_data, pCB, image_view, subpass.pDepthStencilAttachment->layout);
9580    }
9581}
9582
9583VkBool32 validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
9584    VkBool32 skip_call = VK_FALSE;
9585    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
9586        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9587                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
9588                             cmd_name.c_str());
9589    }
9590    return skip_call;
9591}
9592
9593void TransitionFinalSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9594    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9595    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9596    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9597    if (render_pass_data == dev_data->renderPassMap.end()) {
9598        return;
9599    }
9600    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9601    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9602    if (framebuffer_data == dev_data->frameBufferMap.end()) {
9603        return;
9604    }
9605    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9606    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9607        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9608        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
9609    }
9610}
9611
9612bool VerifyRenderAreaBounds(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
9613    bool skip_call = false;
9614    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
9615    if (pRenderPassBegin->renderArea.offset.x < 0 ||
9616        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
9617        pRenderPassBegin->renderArea.offset.y < 0 ||
9618        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
9619        skip_call |= static_cast<bool>(log_msg(
9620            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9621            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
9622            "Cannot execute a render pass with renderArea not within the bound of the "
9623            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
9624            "height %d.",
9625            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
9626            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
9627    }
9628    return skip_call;
9629}
9630
9631VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
9632vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
9633    VkBool32 skipCall = VK_FALSE;
9634    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9635    loader_platform_thread_lock_mutex(&globalLock);
9636    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9637    if (pCB) {
9638        if (pRenderPassBegin && pRenderPassBegin->renderPass) {
9639#if MTMERGE
9640            auto pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9641            if (pass_data != dev_data->renderPassMap.end()) {
9642                RENDER_PASS_NODE* pRPNode = pass_data->second;
9643                pRPNode->fb = pRenderPassBegin->framebuffer;
9644                auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
9645                for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9646                    MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i];
9647                    if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
9648                        if (cb_data != dev_data->commandBufferMap.end()) {
9649                            std::function<VkBool32()> function = [=]() {
9650                                set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9651                                return VK_FALSE;
9652                            };
9653                            cb_data->second->validate_functions.push_back(function);
9654                        }
9655                        VkImageLayout &attachment_layout = pRPNode->attachment_first_layout[pRPNode->attachments[i].attachment];
9656                        if (attachment_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL ||
9657                            attachment_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
9658                            skipCall |=
9659                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9660                                        VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, (uint64_t)(pRenderPassBegin->renderPass), __LINE__,
9661                                        MEMTRACK_INVALID_LAYOUT, "MEM", "Cannot clear attachment %d with invalid first layout %d.",
9662                                        pRPNode->attachments[i].attachment, attachment_layout);
9663                        }
9664                    } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE) {
9665                        if (cb_data != dev_data->commandBufferMap.end()) {
9666                            std::function<VkBool32()> function = [=]() {
9667                                set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9668                                return VK_FALSE;
9669                            };
9670                            cb_data->second->validate_functions.push_back(function);
9671                        }
9672                    } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
9673                        if (cb_data != dev_data->commandBufferMap.end()) {
9674                            std::function<VkBool32()> function = [=]() {
9675                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9676                            };
9677                            cb_data->second->validate_functions.push_back(function);
9678                        }
9679                    }
9680                    if (pRPNode->attachment_first_read[pRPNode->attachments[i].attachment]) {
9681                        if (cb_data != dev_data->commandBufferMap.end()) {
9682                            std::function<VkBool32()> function = [=]() {
9683                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9684                            };
9685                            cb_data->second->validate_functions.push_back(function);
9686                        }
9687                    }
9688                }
9689            }
9690#endif
9691            skipCall |= static_cast<VkBool32>(VerifyRenderAreaBounds(dev_data, pRenderPassBegin));
9692            skipCall |= VerifyFramebufferAndRenderPassLayouts(commandBuffer, pRenderPassBegin);
9693            auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9694            if (render_pass_data != dev_data->renderPassMap.end()) {
9695                skipCall |= ValidateDependencies(dev_data, pRenderPassBegin, render_pass_data->second->subpassToNode);
9696            }
9697            skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass");
9698            skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass");
9699            skipCall |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
9700            pCB->activeRenderPass = pRenderPassBegin->renderPass;
9701            // This is a shallow copy as that is all that is needed for now
9702            pCB->activeRenderPassBeginInfo = *pRenderPassBegin;
9703            pCB->activeSubpass = 0;
9704            pCB->activeSubpassContents = contents;
9705            pCB->framebuffer = pRenderPassBegin->framebuffer;
9706            // Connect this framebuffer to this cmdBuffer
9707            dev_data->frameBufferMap[pCB->framebuffer].referencingCmdBuffers.insert(pCB->commandBuffer);
9708        } else {
9709            skipCall |=
9710                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9711                        DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
9712        }
9713    }
9714    loader_platform_thread_unlock_mutex(&globalLock);
9715    if (VK_FALSE == skipCall) {
9716        dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
9717        loader_platform_thread_lock_mutex(&globalLock);
9718        // This is a shallow copy as that is all that is needed for now
9719        dev_data->renderPassBeginInfo = *pRenderPassBegin;
9720        dev_data->currentSubpass = 0;
9721        loader_platform_thread_unlock_mutex(&globalLock);
9722    }
9723}
9724
9725VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
9726    VkBool32 skipCall = VK_FALSE;
9727    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9728    loader_platform_thread_lock_mutex(&globalLock);
9729    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9730    TransitionSubpassLayouts(commandBuffer, &dev_data->renderPassBeginInfo, ++dev_data->currentSubpass);
9731    if (pCB) {
9732        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
9733        skipCall |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
9734        pCB->activeSubpass++;
9735        pCB->activeSubpassContents = contents;
9736        TransitionSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
9737        if (pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline) {
9738            skipCall |= validatePipelineState(dev_data, pCB, VK_PIPELINE_BIND_POINT_GRAPHICS,
9739                                              pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
9740        }
9741        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
9742    }
9743    loader_platform_thread_unlock_mutex(&globalLock);
9744    if (VK_FALSE == skipCall)
9745        dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents);
9746}
9747
9748VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(VkCommandBuffer commandBuffer) {
9749    VkBool32 skipCall = VK_FALSE;
9750    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9751    loader_platform_thread_lock_mutex(&globalLock);
9752#if MTMERGESOURCE
9753    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
9754    if (cb_data != dev_data->commandBufferMap.end()) {
9755        auto pass_data = dev_data->renderPassMap.find(cb_data->second->activeRenderPass);
9756        if (pass_data != dev_data->renderPassMap.end()) {
9757            RENDER_PASS_NODE* pRPNode = pass_data->second;
9758            for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9759                MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i];
9760                if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
9761                    if (cb_data != dev_data->commandBufferMap.end()) {
9762                        std::function<VkBool32()> function = [=]() {
9763                            set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9764                            return VK_FALSE;
9765                        };
9766                        cb_data->second->validate_functions.push_back(function);
9767                    }
9768                } else if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE) {
9769                    if (cb_data != dev_data->commandBufferMap.end()) {
9770                        std::function<VkBool32()> function = [=]() {
9771                            set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9772                            return VK_FALSE;
9773                        };
9774                        cb_data->second->validate_functions.push_back(function);
9775                    }
9776                }
9777            }
9778        }
9779    }
9780#endif
9781    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9782    TransitionFinalSubpassLayouts(commandBuffer, &dev_data->renderPassBeginInfo);
9783    if (pCB) {
9784        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
9785        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
9786        skipCall |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9787        TransitionFinalSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo);
9788        pCB->activeRenderPass = 0;
9789        pCB->activeSubpass = 0;
9790    }
9791    loader_platform_thread_unlock_mutex(&globalLock);
9792    if (VK_FALSE == skipCall)
9793        dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer);
9794}
9795
9796bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9797                                 VkRenderPass primaryPass, uint32_t primaryAttach, uint32_t secondaryAttach, const char *msg) {
9798    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9799                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9800                   "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9801                   " that is not compatible with the current render pass %" PRIx64 "."
9802                   "Attachment %" PRIu32 " is not compatible with %" PRIu32 ". %s",
9803                   (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass), primaryAttach, secondaryAttach,
9804                   msg);
9805}
9806
9807bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9808                                     uint32_t primaryAttach, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9809                                     uint32_t secondaryAttach, bool is_multi) {
9810    bool skip_call = false;
9811    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9812    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9813    if (primary_data->second->pCreateInfo->attachmentCount <= primaryAttach) {
9814        primaryAttach = VK_ATTACHMENT_UNUSED;
9815    }
9816    if (secondary_data->second->pCreateInfo->attachmentCount <= secondaryAttach) {
9817        secondaryAttach = VK_ATTACHMENT_UNUSED;
9818    }
9819    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
9820        return skip_call;
9821    }
9822    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
9823        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9824                                                 secondaryAttach, "The first is unused while the second is not.");
9825        return skip_call;
9826    }
9827    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
9828        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9829                                                 secondaryAttach, "The second is unused while the first is not.");
9830        return skip_call;
9831    }
9832    if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].format !=
9833        secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].format) {
9834        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9835                                                 secondaryAttach, "They have different formats.");
9836    }
9837    if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].samples !=
9838        secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].samples) {
9839        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9840                                                 secondaryAttach, "They have different samples.");
9841    }
9842    if (is_multi &&
9843        primary_data->second->pCreateInfo->pAttachments[primaryAttach].flags !=
9844            secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].flags) {
9845        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9846                                                 secondaryAttach, "They have different flags.");
9847    }
9848    return skip_call;
9849}
9850
9851bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9852                                  VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass, const int subpass, bool is_multi) {
9853    bool skip_call = false;
9854    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9855    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9856    const VkSubpassDescription &primary_desc = primary_data->second->pCreateInfo->pSubpasses[subpass];
9857    const VkSubpassDescription &secondary_desc = secondary_data->second->pCreateInfo->pSubpasses[subpass];
9858    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
9859    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
9860        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
9861        if (i < primary_desc.inputAttachmentCount) {
9862            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
9863        }
9864        if (i < secondary_desc.inputAttachmentCount) {
9865            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
9866        }
9867        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_input_attach, secondaryBuffer,
9868                                                     secondaryPass, secondary_input_attach, is_multi);
9869    }
9870    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
9871    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
9872        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
9873        if (i < primary_desc.colorAttachmentCount) {
9874            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
9875        }
9876        if (i < secondary_desc.colorAttachmentCount) {
9877            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
9878        }
9879        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_color_attach, secondaryBuffer,
9880                                                     secondaryPass, secondary_color_attach, is_multi);
9881        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
9882        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
9883            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
9884        }
9885        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
9886            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
9887        }
9888        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_resolve_attach, secondaryBuffer,
9889                                                     secondaryPass, secondary_resolve_attach, is_multi);
9890    }
9891    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
9892    if (primary_desc.pDepthStencilAttachment) {
9893        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
9894    }
9895    if (secondary_desc.pDepthStencilAttachment) {
9896        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
9897    }
9898    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_depthstencil_attach, secondaryBuffer,
9899                                                 secondaryPass, secondary_depthstencil_attach, is_multi);
9900    return skip_call;
9901}
9902
9903bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9904                                     VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass) {
9905    bool skip_call = false;
9906    // Early exit if renderPass objects are identical (and therefore compatible)
9907    if (primaryPass == secondaryPass)
9908        return skip_call;
9909    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9910    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9911    if (primary_data == dev_data->renderPassMap.end() || primary_data->second == nullptr) {
9912        skip_call |=
9913            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9914                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9915                    "vkCmdExecuteCommands() called w/ invalid current Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9916                    (void *)primaryBuffer, (uint64_t)(primaryPass));
9917        return skip_call;
9918    }
9919    if (secondary_data == dev_data->renderPassMap.end() || secondary_data->second == nullptr) {
9920        skip_call |=
9921            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9922                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9923                    "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9924                    (void *)secondaryBuffer, (uint64_t)(secondaryPass));
9925        return skip_call;
9926    }
9927    if (primary_data->second->pCreateInfo->subpassCount != secondary_data->second->pCreateInfo->subpassCount) {
9928        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9929                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9930                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9931                             " that is not compatible with the current render pass %" PRIx64 "."
9932                             "They have a different number of subpasses.",
9933                             (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass));
9934        return skip_call;
9935    }
9936    bool is_multi = primary_data->second->pCreateInfo->subpassCount > 1;
9937    for (uint32_t i = 0; i < primary_data->second->pCreateInfo->subpassCount; ++i) {
9938        skip_call |=
9939            validateSubpassCompatibility(dev_data, primaryBuffer, primaryPass, secondaryBuffer, secondaryPass, i, is_multi);
9940    }
9941    return skip_call;
9942}
9943
9944bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9945                         VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
9946    bool skip_call = false;
9947    if (!pSubCB->beginInfo.pInheritanceInfo) {
9948        return skip_call;
9949    }
9950    VkFramebuffer primary_fb = pCB->framebuffer;
9951    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9952    if (secondary_fb != VK_NULL_HANDLE) {
9953        if (primary_fb != secondary_fb) {
9954            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9955                                 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9956                                 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a framebuffer %" PRIx64
9957                                 " that is not compatible with the current framebuffer %" PRIx64 ".",
9958                                 (void *)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb));
9959        }
9960        auto fb_data = dev_data->frameBufferMap.find(secondary_fb);
9961        if (fb_data == dev_data->frameBufferMap.end()) {
9962            skip_call |=
9963                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9964                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9965                                                                          "which has invalid framebuffer %" PRIx64 ".",
9966                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
9967            return skip_call;
9968        }
9969        skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb_data->second.createInfo.renderPass,
9970                                                     secondaryBuffer, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9971    }
9972    return skip_call;
9973}
9974
9975bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9976    bool skipCall = false;
9977    unordered_set<int> activeTypes;
9978    for (auto queryObject : pCB->activeQueries) {
9979        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9980        if (queryPoolData != dev_data->queryPoolMap.end()) {
9981            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9982                pSubCB->beginInfo.pInheritanceInfo) {
9983                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
9984                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
9985                    skipCall |= log_msg(
9986                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9987                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9988                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9989                        "which has invalid active query pool %" PRIx64 ". Pipeline statistics is being queried so the command "
9990                        "buffer must have all bits set on the queryPool.",
9991                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
9992                }
9993            }
9994            activeTypes.insert(queryPoolData->second.createInfo.queryType);
9995        }
9996    }
9997    for (auto queryObject : pSubCB->startedQueries) {
9998        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9999        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
10000            skipCall |=
10001                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10002                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10003                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
10004                        "which has invalid active query pool %" PRIx64 "of type %d but a query of that type has been started on "
10005                        "secondary Cmd Buffer %p.",
10006                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
10007                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
10008        }
10009    }
10010    return skipCall;
10011}
10012
10013VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10014vkCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
10015    VkBool32 skipCall = VK_FALSE;
10016    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10017    loader_platform_thread_lock_mutex(&globalLock);
10018    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10019    if (pCB) {
10020        GLOBAL_CB_NODE *pSubCB = NULL;
10021        for (uint32_t i = 0; i < commandBuffersCount; i++) {
10022            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
10023            if (!pSubCB) {
10024                skipCall |=
10025                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10026                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10027                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p in element %u of pCommandBuffers array.",
10028                            (void *)pCommandBuffers[i], i);
10029            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
10030                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10031                                    __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10032                                    "vkCmdExecuteCommands() called w/ Primary Cmd Buffer %p in element %u of pCommandBuffers "
10033                                    "array. All cmd buffers in pCommandBuffers array must be secondary.",
10034                                    (void *)pCommandBuffers[i], i);
10035            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
10036                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
10037                    skipCall |= log_msg(
10038                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10039                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
10040                        "vkCmdExecuteCommands(): Secondary Command Buffer (%p) executed within render pass (%#" PRIxLEAST64
10041                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
10042                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass);
10043                } else {
10044                    // Make sure render pass is compatible with parent command buffer pass if has continue
10045                    skipCall |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass, pCommandBuffers[i],
10046                                                                pSubCB->beginInfo.pInheritanceInfo->renderPass);
10047                    skipCall |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
10048                }
10049                string errorString = "";
10050                if (!verify_renderpass_compatibility(dev_data, pCB->activeRenderPass,
10051                                                     pSubCB->beginInfo.pInheritanceInfo->renderPass, errorString)) {
10052                    skipCall |= log_msg(
10053                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10054                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
10055                        "vkCmdExecuteCommands(): Secondary Command Buffer (%p) w/ render pass (%#" PRIxLEAST64
10056                        ") is incompatible w/ primary command buffer (%p) w/ render pass (%#" PRIxLEAST64 ") due to: %s",
10057                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
10058                        (uint64_t)pCB->activeRenderPass, errorString.c_str());
10059                }
10060                //  If framebuffer for secondary CB is not NULL, then it must match FB from vkCmdBeginRenderPass()
10061                //   that this CB will be executed in AND framebuffer must have been created w/ RP compatible w/ renderpass
10062                if (pSubCB->beginInfo.pInheritanceInfo->framebuffer) {
10063                    if (pSubCB->beginInfo.pInheritanceInfo->framebuffer != pCB->activeRenderPassBeginInfo.framebuffer) {
10064                        skipCall |= log_msg(
10065                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10066                            (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
10067                            "vkCmdExecuteCommands(): Secondary Command Buffer (%p) references framebuffer (%#" PRIxLEAST64
10068                            ") that does not match framebuffer (%#" PRIxLEAST64 ") in active renderpass (%#" PRIxLEAST64 ").",
10069                            (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer,
10070                            (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass);
10071                    }
10072                }
10073            }
10074            // TODO(mlentine): Move more logic into this method
10075            skipCall |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
10076            skipCall |= validateCommandBufferState(dev_data, pSubCB);
10077            // Secondary cmdBuffers are considered pending execution starting w/
10078            // being recorded
10079            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
10080                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
10081                    skipCall |= log_msg(
10082                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10083                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10084                        "Attempt to simultaneously execute CB %#" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10085                        "set!",
10086                        (uint64_t)(pCB->commandBuffer));
10087                }
10088                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
10089                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
10090                    skipCall |= log_msg(
10091                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10092                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10093                        "vkCmdExecuteCommands(): Secondary Command Buffer (%#" PRIxLEAST64
10094                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
10095                        "(%#" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10096                                          "set, even though it does.",
10097                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
10098                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
10099                }
10100            }
10101            if (!pCB->activeQueries.empty() && !dev_data->physDevProperties.features.inheritedQueries) {
10102                skipCall |=
10103                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10104                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
10105                            "vkCmdExecuteCommands(): Secondary Command Buffer "
10106                            "(%#" PRIxLEAST64 ") cannot be submitted with a query in "
10107                            "flight and inherited queries not "
10108                            "supported on this device.",
10109                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
10110            }
10111            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
10112            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
10113            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
10114        }
10115        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
10116        skipCall |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
10117    }
10118    loader_platform_thread_unlock_mutex(&globalLock);
10119    if (VK_FALSE == skipCall)
10120        dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
10121}
10122
10123VkBool32 ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) {
10124    VkBool32 skip_call = VK_FALSE;
10125    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10126    auto mem_data = dev_data->memObjMap.find(mem);
10127    if ((mem_data != dev_data->memObjMap.end()) && (mem_data->second.image != VK_NULL_HANDLE)) {
10128        std::vector<VkImageLayout> layouts;
10129        if (FindLayouts(dev_data, mem_data->second.image, layouts)) {
10130            for (auto layout : layouts) {
10131                if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
10132                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10133                                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
10134                                                                                         "GENERAL or PREINITIALIZED are supported.",
10135                                         string_VkImageLayout(layout));
10136                }
10137            }
10138        }
10139    }
10140    return skip_call;
10141}
10142
10143VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10144vkMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
10145    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10146
10147    VkBool32 skip_call = VK_FALSE;
10148    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10149    loader_platform_thread_lock_mutex(&globalLock);
10150#if MTMERGESOURCE
10151    DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
10152    if (pMemObj) {
10153        pMemObj->valid = true;
10154        if ((memProps.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
10155            skip_call =
10156                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10157                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
10158                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj %#" PRIxLEAST64, (uint64_t)mem);
10159        }
10160    }
10161    skip_call |= validateMemRange(dev_data, mem, offset, size);
10162    storeMemRanges(dev_data, mem, offset, size);
10163#endif
10164    skip_call |= ValidateMapImageLayouts(device, mem);
10165    loader_platform_thread_unlock_mutex(&globalLock);
10166
10167    if (VK_FALSE == skip_call) {
10168        result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData);
10169#if MTMERGESOURCE
10170        loader_platform_thread_lock_mutex(&globalLock);
10171        initializeAndTrackMemory(dev_data, mem, size, ppData);
10172        loader_platform_thread_unlock_mutex(&globalLock);
10173#endif
10174    }
10175    return result;
10176}
10177
10178#if MTMERGESOURCE
10179VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(VkDevice device, VkDeviceMemory mem) {
10180    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10181    VkBool32 skipCall = VK_FALSE;
10182
10183    loader_platform_thread_lock_mutex(&globalLock);
10184    skipCall |= deleteMemRanges(my_data, mem);
10185    loader_platform_thread_unlock_mutex(&globalLock);
10186    if (VK_FALSE == skipCall) {
10187        my_data->device_dispatch_table->UnmapMemory(device, mem);
10188    }
10189}
10190
10191VkBool32 validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount,
10192                                const VkMappedMemoryRange *pMemRanges) {
10193    VkBool32 skipCall = VK_FALSE;
10194    for (uint32_t i = 0; i < memRangeCount; ++i) {
10195        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
10196        if (mem_element != my_data->memObjMap.end()) {
10197            if (mem_element->second.memRange.offset > pMemRanges[i].offset) {
10198                skipCall |= log_msg(
10199                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10200                    (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10201                    "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
10202                    "(" PRINTF_SIZE_T_SPECIFIER ").",
10203                    funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_element->second.memRange.offset));
10204            }
10205            if ((mem_element->second.memRange.size != VK_WHOLE_SIZE) &&
10206                ((mem_element->second.memRange.offset + mem_element->second.memRange.size) <
10207                 (pMemRanges[i].offset + pMemRanges[i].size))) {
10208                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10209                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10210                                    MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
10211                                                                 ") exceeds the Memory Object's upper-bound "
10212                                                                 "(" PRINTF_SIZE_T_SPECIFIER ").",
10213                                    funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
10214                                    static_cast<size_t>(mem_element->second.memRange.offset + mem_element->second.memRange.size));
10215            }
10216        }
10217    }
10218    return skipCall;
10219}
10220
10221VkBool32 validateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount,
10222                                                  const VkMappedMemoryRange *pMemRanges) {
10223    VkBool32 skipCall = VK_FALSE;
10224    for (uint32_t i = 0; i < memRangeCount; ++i) {
10225        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
10226        if (mem_element != my_data->memObjMap.end()) {
10227            if (mem_element->second.pData) {
10228                VkDeviceSize size = mem_element->second.memRange.size;
10229                VkDeviceSize half_size = (size / 2);
10230                char *data = static_cast<char *>(mem_element->second.pData);
10231                for (auto j = 0; j < half_size; ++j) {
10232                    if (data[j] != NoncoherentMemoryFillValue) {
10233                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10234                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10235                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
10236                                            (uint64_t)pMemRanges[i].memory);
10237                    }
10238                }
10239                for (auto j = size + half_size; j < 2 * size; ++j) {
10240                    if (data[j] != NoncoherentMemoryFillValue) {
10241                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10242                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10243                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
10244                                            (uint64_t)pMemRanges[i].memory);
10245                    }
10246                }
10247                memcpy(mem_element->second.pDriverData, static_cast<void *>(data + (size_t)(half_size)), (size_t)(size));
10248            }
10249        }
10250    }
10251    return skipCall;
10252}
10253
10254VK_LAYER_EXPORT VkResult VKAPI_CALL
10255vkFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10256    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10257    VkBool32 skipCall = VK_FALSE;
10258    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10259
10260    loader_platform_thread_lock_mutex(&globalLock);
10261    skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
10262    skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
10263    loader_platform_thread_unlock_mutex(&globalLock);
10264    if (VK_FALSE == skipCall) {
10265        result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
10266    }
10267    return result;
10268}
10269
10270VK_LAYER_EXPORT VkResult VKAPI_CALL
10271vkInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10272    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10273    VkBool32 skipCall = VK_FALSE;
10274    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10275
10276    loader_platform_thread_lock_mutex(&globalLock);
10277    skipCall |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
10278    loader_platform_thread_unlock_mutex(&globalLock);
10279    if (VK_FALSE == skipCall) {
10280        result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
10281    }
10282    return result;
10283}
10284#endif
10285
10286VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
10287    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10288    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10289    VkBool32 skipCall = VK_FALSE;
10290#if MTMERGESOURCE
10291    loader_platform_thread_lock_mutex(&globalLock);
10292    // Track objects tied to memory
10293    uint64_t image_handle = (uint64_t)(image);
10294    skipCall =
10295        set_mem_binding(dev_data, device, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
10296    add_object_binding_info(dev_data, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, mem);
10297    {
10298        VkMemoryRequirements memRequirements;
10299        vkGetImageMemoryRequirements(device, image, &memRequirements);
10300        skipCall |= validate_buffer_image_aliasing(dev_data, image_handle, mem, memoryOffset, memRequirements,
10301                                                   dev_data->memObjMap[mem].imageRanges, dev_data->memObjMap[mem].bufferRanges,
10302                                                   VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
10303    }
10304    print_mem_list(dev_data, device);
10305    loader_platform_thread_unlock_mutex(&globalLock);
10306#endif
10307    if (VK_FALSE == skipCall) {
10308        result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset);
10309        VkMemoryRequirements memRequirements;
10310        dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements);
10311        loader_platform_thread_lock_mutex(&globalLock);
10312        dev_data->memObjMap[mem].image = image;
10313        dev_data->imageMap[image].mem = mem;
10314        dev_data->imageMap[image].memOffset = memoryOffset;
10315        dev_data->imageMap[image].memSize = memRequirements.size;
10316        loader_platform_thread_unlock_mutex(&globalLock);
10317    }
10318    return result;
10319}
10320
10321VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(VkDevice device, VkEvent event) {
10322    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10323    loader_platform_thread_lock_mutex(&globalLock);
10324    dev_data->eventMap[event].needsSignaled = false;
10325    dev_data->eventMap[event].stageMask = VK_PIPELINE_STAGE_HOST_BIT;
10326    loader_platform_thread_unlock_mutex(&globalLock);
10327    VkResult result = dev_data->device_dispatch_table->SetEvent(device, event);
10328    return result;
10329}
10330
10331VKAPI_ATTR VkResult VKAPI_CALL
10332vkQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
10333    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10334    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10335    VkBool32 skip_call = VK_FALSE;
10336#if MTMERGESOURCE
10337    //MTMTODO : Merge this code with the checks below
10338    loader_platform_thread_lock_mutex(&globalLock);
10339
10340    for (uint32_t i = 0; i < bindInfoCount; i++) {
10341        const VkBindSparseInfo *bindInfo = &pBindInfo[i];
10342        // Track objects tied to memory
10343        for (uint32_t j = 0; j < bindInfo->bufferBindCount; j++) {
10344            for (uint32_t k = 0; k < bindInfo->pBufferBinds[j].bindCount; k++) {
10345                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pBufferBinds[j].pBinds[k].memory,
10346                                           (uint64_t)bindInfo->pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
10347                                           "vkQueueBindSparse"))
10348                    skip_call = VK_TRUE;
10349            }
10350        }
10351        for (uint32_t j = 0; j < bindInfo->imageOpaqueBindCount; j++) {
10352            for (uint32_t k = 0; k < bindInfo->pImageOpaqueBinds[j].bindCount; k++) {
10353                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pImageOpaqueBinds[j].pBinds[k].memory,
10354                                           (uint64_t)bindInfo->pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10355                                           "vkQueueBindSparse"))
10356                    skip_call = VK_TRUE;
10357            }
10358        }
10359        for (uint32_t j = 0; j < bindInfo->imageBindCount; j++) {
10360            for (uint32_t k = 0; k < bindInfo->pImageBinds[j].bindCount; k++) {
10361                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pImageBinds[j].pBinds[k].memory,
10362                                           (uint64_t)bindInfo->pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10363                                           "vkQueueBindSparse"))
10364                    skip_call = VK_TRUE;
10365            }
10366        }
10367        // Validate semaphore state
10368        for (uint32_t i = 0; i < bindInfo->waitSemaphoreCount; i++) {
10369            VkSemaphore sem = bindInfo->pWaitSemaphores[i];
10370
10371            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10372                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_SIGNALLED) {
10373                    skip_call =
10374                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10375                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10376                                "vkQueueBindSparse: Semaphore must be in signaled state before passing to pWaitSemaphores");
10377                }
10378                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_WAIT;
10379            }
10380        }
10381        for (uint32_t i = 0; i < bindInfo->signalSemaphoreCount; i++) {
10382            VkSemaphore sem = bindInfo->pSignalSemaphores[i];
10383
10384            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10385                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
10386                    skip_call =
10387                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10388                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10389                                "vkQueueBindSparse: Semaphore must not be currently signaled or in a wait state");
10390                }
10391                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
10392            }
10393        }
10394    }
10395
10396    print_mem_list(dev_data, queue);
10397    loader_platform_thread_unlock_mutex(&globalLock);
10398#endif
10399    loader_platform_thread_lock_mutex(&globalLock);
10400    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
10401        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
10402        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10403            if (dev_data->semaphoreMap[bindInfo.pWaitSemaphores[i]].signaled) {
10404                dev_data->semaphoreMap[bindInfo.pWaitSemaphores[i]].signaled = 0;
10405            } else {
10406                skip_call |=
10407                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10408                            __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10409                            "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
10410                            (uint64_t)(queue), (uint64_t)(bindInfo.pWaitSemaphores[i]));
10411            }
10412        }
10413        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
10414            dev_data->semaphoreMap[bindInfo.pSignalSemaphores[i]].signaled = 1;
10415        }
10416    }
10417    loader_platform_thread_unlock_mutex(&globalLock);
10418
10419    if (VK_FALSE == skip_call)
10420        return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
10421#if MTMERGESOURCE
10422    // Update semaphore state
10423    loader_platform_thread_lock_mutex(&globalLock);
10424    for (uint32_t bind_info_idx = 0; bind_info_idx < bindInfoCount; bind_info_idx++) {
10425        const VkBindSparseInfo *bindInfo = &pBindInfo[bind_info_idx];
10426        for (uint32_t i = 0; i < bindInfo->waitSemaphoreCount; i++) {
10427            VkSemaphore sem = bindInfo->pWaitSemaphores[i];
10428
10429            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10430                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10431            }
10432        }
10433    }
10434    loader_platform_thread_unlock_mutex(&globalLock);
10435#endif
10436
10437    return result;
10438}
10439
10440VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
10441                                                 const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
10442    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10443    VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
10444    if (result == VK_SUCCESS) {
10445        loader_platform_thread_lock_mutex(&globalLock);
10446        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
10447        sNode->signaled = 0;
10448        sNode->queue = VK_NULL_HANDLE;
10449        sNode->in_use.store(0);
10450        sNode->state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10451        loader_platform_thread_unlock_mutex(&globalLock);
10452    }
10453    return result;
10454}
10455
10456VKAPI_ATTR VkResult VKAPI_CALL
10457vkCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
10458    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10459    VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
10460    if (result == VK_SUCCESS) {
10461        loader_platform_thread_lock_mutex(&globalLock);
10462        dev_data->eventMap[*pEvent].needsSignaled = false;
10463        dev_data->eventMap[*pEvent].in_use.store(0);
10464        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
10465        loader_platform_thread_unlock_mutex(&globalLock);
10466    }
10467    return result;
10468}
10469
10470VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
10471                                                                    const VkAllocationCallbacks *pAllocator,
10472                                                                    VkSwapchainKHR *pSwapchain) {
10473    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10474    VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
10475
10476    if (VK_SUCCESS == result) {
10477        SWAPCHAIN_NODE *psc_node = new SWAPCHAIN_NODE(pCreateInfo);
10478        loader_platform_thread_lock_mutex(&globalLock);
10479        dev_data->device_extensions.swapchainMap[*pSwapchain] = psc_node;
10480        loader_platform_thread_unlock_mutex(&globalLock);
10481    }
10482
10483    return result;
10484}
10485
10486VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10487vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
10488    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10489    bool skipCall = false;
10490
10491    loader_platform_thread_lock_mutex(&globalLock);
10492    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(swapchain);
10493    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
10494        if (swapchain_data->second->images.size() > 0) {
10495            for (auto swapchain_image : swapchain_data->second->images) {
10496                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
10497                if (image_sub != dev_data->imageSubresourceMap.end()) {
10498                    for (auto imgsubpair : image_sub->second) {
10499                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
10500                        if (image_item != dev_data->imageLayoutMap.end()) {
10501                            dev_data->imageLayoutMap.erase(image_item);
10502                        }
10503                    }
10504                    dev_data->imageSubresourceMap.erase(image_sub);
10505                }
10506#if MTMERGESOURCE
10507                skipCall = clear_object_binding(dev_data, device, (uint64_t)swapchain_image,
10508                                                VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
10509                dev_data->imageBindingMap.erase((uint64_t)swapchain_image);
10510#endif
10511            }
10512        }
10513        delete swapchain_data->second;
10514        dev_data->device_extensions.swapchainMap.erase(swapchain);
10515    }
10516    loader_platform_thread_unlock_mutex(&globalLock);
10517    if (!skipCall)
10518        dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator);
10519}
10520
10521VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10522vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
10523    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10524    VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
10525
10526    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
10527        // This should never happen and is checked by param checker.
10528        if (!pCount)
10529            return result;
10530        loader_platform_thread_lock_mutex(&globalLock);
10531        const size_t count = *pCount;
10532        auto swapchain_node = dev_data->device_extensions.swapchainMap[swapchain];
10533        if (!swapchain_node->images.empty()) {
10534            // TODO : Not sure I like the memcmp here, but it works
10535            const bool mismatch = (swapchain_node->images.size() != count ||
10536                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
10537            if (mismatch) {
10538                // TODO: Verify against Valid Usage section of extension
10539                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10540                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
10541                        "vkGetSwapchainInfoKHR(%" PRIu64
10542                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
10543                        (uint64_t)(swapchain));
10544            }
10545        }
10546        for (uint32_t i = 0; i < *pCount; ++i) {
10547            IMAGE_LAYOUT_NODE image_layout_node;
10548            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
10549            image_layout_node.format = swapchain_node->createInfo.imageFormat;
10550            dev_data->imageMap[pSwapchainImages[i]].createInfo.mipLevels = 1;
10551            dev_data->imageMap[pSwapchainImages[i]].createInfo.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
10552            swapchain_node->images.push_back(pSwapchainImages[i]);
10553            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
10554            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
10555            dev_data->imageLayoutMap[subpair] = image_layout_node;
10556            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
10557        }
10558        if (!swapchain_node->images.empty()) {
10559            for (auto image : swapchain_node->images) {
10560                // Add image object binding, then insert the new Mem Object and then bind it to created image
10561#if MTMERGESOURCE
10562                add_object_create_info(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10563                                       &swapchain_node->createInfo);
10564#endif
10565            }
10566        }
10567        loader_platform_thread_unlock_mutex(&globalLock);
10568    }
10569    return result;
10570}
10571
10572VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
10573    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10574    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10575    bool skip_call = false;
10576
10577    if (pPresentInfo) {
10578        loader_platform_thread_lock_mutex(&globalLock);
10579        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10580            if (dev_data->semaphoreMap[pPresentInfo->pWaitSemaphores[i]].signaled) {
10581                dev_data->semaphoreMap[pPresentInfo->pWaitSemaphores[i]].signaled = 0;
10582            } else {
10583                skip_call |=
10584                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10585                            __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10586                            "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
10587                            (uint64_t)(queue), (uint64_t)(pPresentInfo->pWaitSemaphores[i]));
10588            }
10589        }
10590        VkDeviceMemory mem;
10591        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10592            auto swapchain_data = dev_data->device_extensions.swapchainMap.find(pPresentInfo->pSwapchains[i]);
10593            if (swapchain_data != dev_data->device_extensions.swapchainMap.end() &&
10594                pPresentInfo->pImageIndices[i] < swapchain_data->second->images.size()) {
10595                VkImage image = swapchain_data->second->images[pPresentInfo->pImageIndices[i]];
10596#if MTMERGESOURCE
10597                skip_call |=
10598                    get_mem_binding_from_object(dev_data, queue, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
10599                skip_call |= validate_memory_is_valid(dev_data, mem, "vkQueuePresentKHR()", image);
10600#endif
10601                vector<VkImageLayout> layouts;
10602                if (FindLayouts(dev_data, image, layouts)) {
10603                    for (auto layout : layouts) {
10604                        if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
10605                            skip_call |=
10606                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
10607                                        reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10608                                        "Images passed to present must be in layout "
10609                                        "PRESENT_SOURCE_KHR but is in %s",
10610                                        string_VkImageLayout(layout));
10611                        }
10612                    }
10613                }
10614            }
10615        }
10616        loader_platform_thread_unlock_mutex(&globalLock);
10617    }
10618
10619    if (!skip_call)
10620        result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo);
10621#if MTMERGESOURCE
10622    loader_platform_thread_lock_mutex(&globalLock);
10623    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; i++) {
10624        VkSemaphore sem = pPresentInfo->pWaitSemaphores[i];
10625        if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10626            dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10627        }
10628    }
10629    loader_platform_thread_unlock_mutex(&globalLock);
10630#endif
10631    return result;
10632}
10633
10634VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
10635                                                     VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
10636    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10637    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10638    bool skipCall = false;
10639#if MTMERGESOURCE
10640    loader_platform_thread_lock_mutex(&globalLock);
10641    if (semaphore != VK_NULL_HANDLE &&
10642        dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
10643        if (dev_data->semaphoreMap[semaphore].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
10644            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10645                               (uint64_t)semaphore, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10646                               "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
10647        }
10648        dev_data->semaphoreMap[semaphore].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
10649        dev_data->semaphoreMap[semaphore].in_use.fetch_add(1);
10650    }
10651    auto fence_data = dev_data->fenceMap.find(fence);
10652    if (fence_data != dev_data->fenceMap.end()) {
10653        fence_data->second.swapchain = swapchain;
10654    }
10655    loader_platform_thread_unlock_mutex(&globalLock);
10656#endif
10657    if (!skipCall) {
10658        result =
10659            dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
10660    }
10661    loader_platform_thread_lock_mutex(&globalLock);
10662    // FIXME/TODO: Need to add some thing code the "fence" parameter
10663    dev_data->semaphoreMap[semaphore].signaled = 1;
10664    loader_platform_thread_unlock_mutex(&globalLock);
10665    return result;
10666}
10667
10668VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10669vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10670                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
10671    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10672    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10673    VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10674    if (VK_SUCCESS == res) {
10675        loader_platform_thread_lock_mutex(&globalLock);
10676        res = layer_create_msg_callback(my_data->report_data, pCreateInfo, pAllocator, pMsgCallback);
10677        loader_platform_thread_unlock_mutex(&globalLock);
10678    }
10679    return res;
10680}
10681
10682VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance,
10683                                                                           VkDebugReportCallbackEXT msgCallback,
10684                                                                           const VkAllocationCallbacks *pAllocator) {
10685    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10686    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10687    pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10688    loader_platform_thread_lock_mutex(&globalLock);
10689    layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
10690    loader_platform_thread_unlock_mutex(&globalLock);
10691}
10692
10693VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10694vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
10695                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10696    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10697    my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
10698                                                            pMsg);
10699}
10700
10701VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10702    if (!strcmp(funcName, "vkGetDeviceProcAddr"))
10703        return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
10704    if (!strcmp(funcName, "vkDestroyDevice"))
10705        return (PFN_vkVoidFunction)vkDestroyDevice;
10706    if (!strcmp(funcName, "vkQueueSubmit"))
10707        return (PFN_vkVoidFunction)vkQueueSubmit;
10708    if (!strcmp(funcName, "vkWaitForFences"))
10709        return (PFN_vkVoidFunction)vkWaitForFences;
10710    if (!strcmp(funcName, "vkGetFenceStatus"))
10711        return (PFN_vkVoidFunction)vkGetFenceStatus;
10712    if (!strcmp(funcName, "vkQueueWaitIdle"))
10713        return (PFN_vkVoidFunction)vkQueueWaitIdle;
10714    if (!strcmp(funcName, "vkDeviceWaitIdle"))
10715        return (PFN_vkVoidFunction)vkDeviceWaitIdle;
10716    if (!strcmp(funcName, "vkGetDeviceQueue"))
10717        return (PFN_vkVoidFunction)vkGetDeviceQueue;
10718    if (!strcmp(funcName, "vkDestroyInstance"))
10719        return (PFN_vkVoidFunction)vkDestroyInstance;
10720    if (!strcmp(funcName, "vkDestroyDevice"))
10721        return (PFN_vkVoidFunction)vkDestroyDevice;
10722    if (!strcmp(funcName, "vkDestroyFence"))
10723        return (PFN_vkVoidFunction)vkDestroyFence;
10724    if (!strcmp(funcName, "vkResetFences"))
10725        return (PFN_vkVoidFunction)vkResetFences;
10726    if (!strcmp(funcName, "vkDestroySemaphore"))
10727        return (PFN_vkVoidFunction)vkDestroySemaphore;
10728    if (!strcmp(funcName, "vkDestroyEvent"))
10729        return (PFN_vkVoidFunction)vkDestroyEvent;
10730    if (!strcmp(funcName, "vkDestroyQueryPool"))
10731        return (PFN_vkVoidFunction)vkDestroyQueryPool;
10732    if (!strcmp(funcName, "vkDestroyBuffer"))
10733        return (PFN_vkVoidFunction)vkDestroyBuffer;
10734    if (!strcmp(funcName, "vkDestroyBufferView"))
10735        return (PFN_vkVoidFunction)vkDestroyBufferView;
10736    if (!strcmp(funcName, "vkDestroyImage"))
10737        return (PFN_vkVoidFunction)vkDestroyImage;
10738    if (!strcmp(funcName, "vkDestroyImageView"))
10739        return (PFN_vkVoidFunction)vkDestroyImageView;
10740    if (!strcmp(funcName, "vkDestroyShaderModule"))
10741        return (PFN_vkVoidFunction)vkDestroyShaderModule;
10742    if (!strcmp(funcName, "vkDestroyPipeline"))
10743        return (PFN_vkVoidFunction)vkDestroyPipeline;
10744    if (!strcmp(funcName, "vkDestroyPipelineLayout"))
10745        return (PFN_vkVoidFunction)vkDestroyPipelineLayout;
10746    if (!strcmp(funcName, "vkDestroySampler"))
10747        return (PFN_vkVoidFunction)vkDestroySampler;
10748    if (!strcmp(funcName, "vkDestroyDescriptorSetLayout"))
10749        return (PFN_vkVoidFunction)vkDestroyDescriptorSetLayout;
10750    if (!strcmp(funcName, "vkDestroyDescriptorPool"))
10751        return (PFN_vkVoidFunction)vkDestroyDescriptorPool;
10752    if (!strcmp(funcName, "vkDestroyFramebuffer"))
10753        return (PFN_vkVoidFunction)vkDestroyFramebuffer;
10754    if (!strcmp(funcName, "vkDestroyRenderPass"))
10755        return (PFN_vkVoidFunction)vkDestroyRenderPass;
10756    if (!strcmp(funcName, "vkCreateBuffer"))
10757        return (PFN_vkVoidFunction)vkCreateBuffer;
10758    if (!strcmp(funcName, "vkCreateBufferView"))
10759        return (PFN_vkVoidFunction)vkCreateBufferView;
10760    if (!strcmp(funcName, "vkCreateImage"))
10761        return (PFN_vkVoidFunction)vkCreateImage;
10762    if (!strcmp(funcName, "vkCreateImageView"))
10763        return (PFN_vkVoidFunction)vkCreateImageView;
10764    if (!strcmp(funcName, "vkCreateFence"))
10765        return (PFN_vkVoidFunction)vkCreateFence;
10766    if (!strcmp(funcName, "CreatePipelineCache"))
10767        return (PFN_vkVoidFunction)vkCreatePipelineCache;
10768    if (!strcmp(funcName, "DestroyPipelineCache"))
10769        return (PFN_vkVoidFunction)vkDestroyPipelineCache;
10770    if (!strcmp(funcName, "GetPipelineCacheData"))
10771        return (PFN_vkVoidFunction)vkGetPipelineCacheData;
10772    if (!strcmp(funcName, "MergePipelineCaches"))
10773        return (PFN_vkVoidFunction)vkMergePipelineCaches;
10774    if (!strcmp(funcName, "vkCreateGraphicsPipelines"))
10775        return (PFN_vkVoidFunction)vkCreateGraphicsPipelines;
10776    if (!strcmp(funcName, "vkCreateComputePipelines"))
10777        return (PFN_vkVoidFunction)vkCreateComputePipelines;
10778    if (!strcmp(funcName, "vkCreateSampler"))
10779        return (PFN_vkVoidFunction)vkCreateSampler;
10780    if (!strcmp(funcName, "vkCreateDescriptorSetLayout"))
10781        return (PFN_vkVoidFunction)vkCreateDescriptorSetLayout;
10782    if (!strcmp(funcName, "vkCreatePipelineLayout"))
10783        return (PFN_vkVoidFunction)vkCreatePipelineLayout;
10784    if (!strcmp(funcName, "vkCreateDescriptorPool"))
10785        return (PFN_vkVoidFunction)vkCreateDescriptorPool;
10786    if (!strcmp(funcName, "vkResetDescriptorPool"))
10787        return (PFN_vkVoidFunction)vkResetDescriptorPool;
10788    if (!strcmp(funcName, "vkAllocateDescriptorSets"))
10789        return (PFN_vkVoidFunction)vkAllocateDescriptorSets;
10790    if (!strcmp(funcName, "vkFreeDescriptorSets"))
10791        return (PFN_vkVoidFunction)vkFreeDescriptorSets;
10792    if (!strcmp(funcName, "vkUpdateDescriptorSets"))
10793        return (PFN_vkVoidFunction)vkUpdateDescriptorSets;
10794    if (!strcmp(funcName, "vkCreateCommandPool"))
10795        return (PFN_vkVoidFunction)vkCreateCommandPool;
10796    if (!strcmp(funcName, "vkDestroyCommandPool"))
10797        return (PFN_vkVoidFunction)vkDestroyCommandPool;
10798    if (!strcmp(funcName, "vkResetCommandPool"))
10799        return (PFN_vkVoidFunction)vkResetCommandPool;
10800    if (!strcmp(funcName, "vkCreateQueryPool"))
10801        return (PFN_vkVoidFunction)vkCreateQueryPool;
10802    if (!strcmp(funcName, "vkAllocateCommandBuffers"))
10803        return (PFN_vkVoidFunction)vkAllocateCommandBuffers;
10804    if (!strcmp(funcName, "vkFreeCommandBuffers"))
10805        return (PFN_vkVoidFunction)vkFreeCommandBuffers;
10806    if (!strcmp(funcName, "vkBeginCommandBuffer"))
10807        return (PFN_vkVoidFunction)vkBeginCommandBuffer;
10808    if (!strcmp(funcName, "vkEndCommandBuffer"))
10809        return (PFN_vkVoidFunction)vkEndCommandBuffer;
10810    if (!strcmp(funcName, "vkResetCommandBuffer"))
10811        return (PFN_vkVoidFunction)vkResetCommandBuffer;
10812    if (!strcmp(funcName, "vkCmdBindPipeline"))
10813        return (PFN_vkVoidFunction)vkCmdBindPipeline;
10814    if (!strcmp(funcName, "vkCmdSetViewport"))
10815        return (PFN_vkVoidFunction)vkCmdSetViewport;
10816    if (!strcmp(funcName, "vkCmdSetScissor"))
10817        return (PFN_vkVoidFunction)vkCmdSetScissor;
10818    if (!strcmp(funcName, "vkCmdSetLineWidth"))
10819        return (PFN_vkVoidFunction)vkCmdSetLineWidth;
10820    if (!strcmp(funcName, "vkCmdSetDepthBias"))
10821        return (PFN_vkVoidFunction)vkCmdSetDepthBias;
10822    if (!strcmp(funcName, "vkCmdSetBlendConstants"))
10823        return (PFN_vkVoidFunction)vkCmdSetBlendConstants;
10824    if (!strcmp(funcName, "vkCmdSetDepthBounds"))
10825        return (PFN_vkVoidFunction)vkCmdSetDepthBounds;
10826    if (!strcmp(funcName, "vkCmdSetStencilCompareMask"))
10827        return (PFN_vkVoidFunction)vkCmdSetStencilCompareMask;
10828    if (!strcmp(funcName, "vkCmdSetStencilWriteMask"))
10829        return (PFN_vkVoidFunction)vkCmdSetStencilWriteMask;
10830    if (!strcmp(funcName, "vkCmdSetStencilReference"))
10831        return (PFN_vkVoidFunction)vkCmdSetStencilReference;
10832    if (!strcmp(funcName, "vkCmdBindDescriptorSets"))
10833        return (PFN_vkVoidFunction)vkCmdBindDescriptorSets;
10834    if (!strcmp(funcName, "vkCmdBindVertexBuffers"))
10835        return (PFN_vkVoidFunction)vkCmdBindVertexBuffers;
10836    if (!strcmp(funcName, "vkCmdBindIndexBuffer"))
10837        return (PFN_vkVoidFunction)vkCmdBindIndexBuffer;
10838    if (!strcmp(funcName, "vkCmdDraw"))
10839        return (PFN_vkVoidFunction)vkCmdDraw;
10840    if (!strcmp(funcName, "vkCmdDrawIndexed"))
10841        return (PFN_vkVoidFunction)vkCmdDrawIndexed;
10842    if (!strcmp(funcName, "vkCmdDrawIndirect"))
10843        return (PFN_vkVoidFunction)vkCmdDrawIndirect;
10844    if (!strcmp(funcName, "vkCmdDrawIndexedIndirect"))
10845        return (PFN_vkVoidFunction)vkCmdDrawIndexedIndirect;
10846    if (!strcmp(funcName, "vkCmdDispatch"))
10847        return (PFN_vkVoidFunction)vkCmdDispatch;
10848    if (!strcmp(funcName, "vkCmdDispatchIndirect"))
10849        return (PFN_vkVoidFunction)vkCmdDispatchIndirect;
10850    if (!strcmp(funcName, "vkCmdCopyBuffer"))
10851        return (PFN_vkVoidFunction)vkCmdCopyBuffer;
10852    if (!strcmp(funcName, "vkCmdCopyImage"))
10853        return (PFN_vkVoidFunction)vkCmdCopyImage;
10854    if (!strcmp(funcName, "vkCmdBlitImage"))
10855        return (PFN_vkVoidFunction)vkCmdBlitImage;
10856    if (!strcmp(funcName, "vkCmdCopyBufferToImage"))
10857        return (PFN_vkVoidFunction)vkCmdCopyBufferToImage;
10858    if (!strcmp(funcName, "vkCmdCopyImageToBuffer"))
10859        return (PFN_vkVoidFunction)vkCmdCopyImageToBuffer;
10860    if (!strcmp(funcName, "vkCmdUpdateBuffer"))
10861        return (PFN_vkVoidFunction)vkCmdUpdateBuffer;
10862    if (!strcmp(funcName, "vkCmdFillBuffer"))
10863        return (PFN_vkVoidFunction)vkCmdFillBuffer;
10864    if (!strcmp(funcName, "vkCmdClearColorImage"))
10865        return (PFN_vkVoidFunction)vkCmdClearColorImage;
10866    if (!strcmp(funcName, "vkCmdClearDepthStencilImage"))
10867        return (PFN_vkVoidFunction)vkCmdClearDepthStencilImage;
10868    if (!strcmp(funcName, "vkCmdClearAttachments"))
10869        return (PFN_vkVoidFunction)vkCmdClearAttachments;
10870    if (!strcmp(funcName, "vkCmdResolveImage"))
10871        return (PFN_vkVoidFunction)vkCmdResolveImage;
10872    if (!strcmp(funcName, "vkCmdSetEvent"))
10873        return (PFN_vkVoidFunction)vkCmdSetEvent;
10874    if (!strcmp(funcName, "vkCmdResetEvent"))
10875        return (PFN_vkVoidFunction)vkCmdResetEvent;
10876    if (!strcmp(funcName, "vkCmdWaitEvents"))
10877        return (PFN_vkVoidFunction)vkCmdWaitEvents;
10878    if (!strcmp(funcName, "vkCmdPipelineBarrier"))
10879        return (PFN_vkVoidFunction)vkCmdPipelineBarrier;
10880    if (!strcmp(funcName, "vkCmdBeginQuery"))
10881        return (PFN_vkVoidFunction)vkCmdBeginQuery;
10882    if (!strcmp(funcName, "vkCmdEndQuery"))
10883        return (PFN_vkVoidFunction)vkCmdEndQuery;
10884    if (!strcmp(funcName, "vkCmdResetQueryPool"))
10885        return (PFN_vkVoidFunction)vkCmdResetQueryPool;
10886    if (!strcmp(funcName, "vkCmdCopyQueryPoolResults"))
10887        return (PFN_vkVoidFunction)vkCmdCopyQueryPoolResults;
10888    if (!strcmp(funcName, "vkCmdPushConstants"))
10889        return (PFN_vkVoidFunction)vkCmdPushConstants;
10890    if (!strcmp(funcName, "vkCmdWriteTimestamp"))
10891        return (PFN_vkVoidFunction)vkCmdWriteTimestamp;
10892    if (!strcmp(funcName, "vkCreateFramebuffer"))
10893        return (PFN_vkVoidFunction)vkCreateFramebuffer;
10894    if (!strcmp(funcName, "vkCreateShaderModule"))
10895        return (PFN_vkVoidFunction)vkCreateShaderModule;
10896    if (!strcmp(funcName, "vkCreateRenderPass"))
10897        return (PFN_vkVoidFunction)vkCreateRenderPass;
10898    if (!strcmp(funcName, "vkCmdBeginRenderPass"))
10899        return (PFN_vkVoidFunction)vkCmdBeginRenderPass;
10900    if (!strcmp(funcName, "vkCmdNextSubpass"))
10901        return (PFN_vkVoidFunction)vkCmdNextSubpass;
10902    if (!strcmp(funcName, "vkCmdEndRenderPass"))
10903        return (PFN_vkVoidFunction)vkCmdEndRenderPass;
10904    if (!strcmp(funcName, "vkCmdExecuteCommands"))
10905        return (PFN_vkVoidFunction)vkCmdExecuteCommands;
10906    if (!strcmp(funcName, "vkSetEvent"))
10907        return (PFN_vkVoidFunction)vkSetEvent;
10908    if (!strcmp(funcName, "vkMapMemory"))
10909        return (PFN_vkVoidFunction)vkMapMemory;
10910#if MTMERGESOURCE
10911    if (!strcmp(funcName, "vkUnmapMemory"))
10912        return (PFN_vkVoidFunction)vkUnmapMemory;
10913    if (!strcmp(funcName, "vkAllocateMemory"))
10914        return (PFN_vkVoidFunction)vkAllocateMemory;
10915    if (!strcmp(funcName, "vkFreeMemory"))
10916        return (PFN_vkVoidFunction)vkFreeMemory;
10917    if (!strcmp(funcName, "vkFlushMappedMemoryRanges"))
10918        return (PFN_vkVoidFunction)vkFlushMappedMemoryRanges;
10919    if (!strcmp(funcName, "vkInvalidateMappedMemoryRanges"))
10920        return (PFN_vkVoidFunction)vkInvalidateMappedMemoryRanges;
10921    if (!strcmp(funcName, "vkBindBufferMemory"))
10922        return (PFN_vkVoidFunction)vkBindBufferMemory;
10923    if (!strcmp(funcName, "vkGetBufferMemoryRequirements"))
10924        return (PFN_vkVoidFunction)vkGetBufferMemoryRequirements;
10925    if (!strcmp(funcName, "vkGetImageMemoryRequirements"))
10926        return (PFN_vkVoidFunction)vkGetImageMemoryRequirements;
10927#endif
10928    if (!strcmp(funcName, "vkGetQueryPoolResults"))
10929        return (PFN_vkVoidFunction)vkGetQueryPoolResults;
10930    if (!strcmp(funcName, "vkBindImageMemory"))
10931        return (PFN_vkVoidFunction)vkBindImageMemory;
10932    if (!strcmp(funcName, "vkQueueBindSparse"))
10933        return (PFN_vkVoidFunction)vkQueueBindSparse;
10934    if (!strcmp(funcName, "vkCreateSemaphore"))
10935        return (PFN_vkVoidFunction)vkCreateSemaphore;
10936    if (!strcmp(funcName, "vkCreateEvent"))
10937        return (PFN_vkVoidFunction)vkCreateEvent;
10938
10939    if (dev == NULL)
10940        return NULL;
10941
10942    layer_data *dev_data;
10943    dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
10944
10945    if (dev_data->device_extensions.wsi_enabled) {
10946        if (!strcmp(funcName, "vkCreateSwapchainKHR"))
10947            return (PFN_vkVoidFunction)vkCreateSwapchainKHR;
10948        if (!strcmp(funcName, "vkDestroySwapchainKHR"))
10949            return (PFN_vkVoidFunction)vkDestroySwapchainKHR;
10950        if (!strcmp(funcName, "vkGetSwapchainImagesKHR"))
10951            return (PFN_vkVoidFunction)vkGetSwapchainImagesKHR;
10952        if (!strcmp(funcName, "vkAcquireNextImageKHR"))
10953            return (PFN_vkVoidFunction)vkAcquireNextImageKHR;
10954        if (!strcmp(funcName, "vkQueuePresentKHR"))
10955            return (PFN_vkVoidFunction)vkQueuePresentKHR;
10956    }
10957
10958    VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
10959    {
10960        if (pTable->GetDeviceProcAddr == NULL)
10961            return NULL;
10962        return pTable->GetDeviceProcAddr(dev, funcName);
10963    }
10964}
10965
10966VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
10967    if (!strcmp(funcName, "vkGetInstanceProcAddr"))
10968        return (PFN_vkVoidFunction)vkGetInstanceProcAddr;
10969    if (!strcmp(funcName, "vkGetDeviceProcAddr"))
10970        return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
10971    if (!strcmp(funcName, "vkCreateInstance"))
10972        return (PFN_vkVoidFunction)vkCreateInstance;
10973    if (!strcmp(funcName, "vkCreateDevice"))
10974        return (PFN_vkVoidFunction)vkCreateDevice;
10975    if (!strcmp(funcName, "vkDestroyInstance"))
10976        return (PFN_vkVoidFunction)vkDestroyInstance;
10977#if MTMERGESOURCE
10978    if (!strcmp(funcName, "vkGetPhysicalDeviceMemoryProperties"))
10979        return (PFN_vkVoidFunction)vkGetPhysicalDeviceMemoryProperties;
10980#endif
10981    if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties"))
10982        return (PFN_vkVoidFunction)vkEnumerateInstanceLayerProperties;
10983    if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties"))
10984        return (PFN_vkVoidFunction)vkEnumerateInstanceExtensionProperties;
10985    if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties"))
10986        return (PFN_vkVoidFunction)vkEnumerateDeviceLayerProperties;
10987    if (!strcmp(funcName, "vkEnumerateDeviceExtensionProperties"))
10988        return (PFN_vkVoidFunction)vkEnumerateDeviceExtensionProperties;
10989
10990    if (instance == NULL)
10991        return NULL;
10992
10993    PFN_vkVoidFunction fptr;
10994
10995    layer_data *my_data;
10996    my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10997    fptr = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
10998    if (fptr)
10999        return fptr;
11000
11001    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
11002    if (pTable->GetInstanceProcAddr == NULL)
11003        return NULL;
11004    return pTable->GetInstanceProcAddr(instance, funcName);
11005}
11006