core_validation.cpp revision e9fc72c9b6850b2769d7100bb4e9cffde395f05b
1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and/or associated documentation files (the "Materials"), to
8 * deal in the Materials without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Materials, and to permit persons to whom the Materials
11 * are furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice(s) and this permission notice shall be included
14 * in all copies or substantial portions of the Materials.
15 *
16 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 *
20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE
23 * USE OR OTHER DEALINGS IN THE MATERIALS
24 *
25 * Author: Cody Northrop <cnorthrop@google.com>
26 * Author: Michael Lentine <mlentine@google.com>
27 * Author: Tobin Ehlis <tobine@google.com>
28 * Author: Chia-I Wu <olv@google.com>
29 * Author: Chris Forbes <chrisf@ijw.co.nz>
30 * Author: Mark Lobodzinski <mark@lunarg.com>
31 * Author: Ian Elliott <ianelliott@google.com>
32 */
33
34// Allow use of STL min and max functions in Windows
35#define NOMINMAX
36
37// Turn on mem_tracker merged code
38#define MTMERGESOURCE 1
39
40#include <stdio.h>
41#include <stdlib.h>
42#include <string.h>
43#include <assert.h>
44#include <unordered_map>
45#include <unordered_set>
46#include <map>
47#include <string>
48#include <iostream>
49#include <algorithm>
50#include <list>
51#include <SPIRV/spirv.hpp>
52#include <set>
53
54#include "vk_loader_platform.h"
55#include "vk_dispatch_table_helper.h"
56#include "vk_struct_string_helper_cpp.h"
57#if defined(__GNUC__)
58#pragma GCC diagnostic ignored "-Wwrite-strings"
59#endif
60#if defined(__GNUC__)
61#pragma GCC diagnostic warning "-Wwrite-strings"
62#endif
63#include "vk_struct_size_helper.h"
64#include "core_validation.h"
65#include "vk_layer_config.h"
66#include "vk_layer_table.h"
67#include "vk_layer_data.h"
68#include "vk_layer_logging.h"
69#include "vk_layer_extension_utils.h"
70#include "vk_layer_utils.h"
71
72#if defined __ANDROID__
73#include <android/log.h>
74#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
75#else
76#define LOGCONSOLE(...) printf(__VA_ARGS__)
77#endif
78
79using std::unordered_map;
80using std::unordered_set;
81
82#if MTMERGESOURCE
83// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
84// Object value will be used to identify them internally.
85static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
86#endif
87// Track command pools and their command buffers
88struct CMD_POOL_INFO {
89    VkCommandPoolCreateFlags createFlags;
90    uint32_t queueFamilyIndex;
91    list<VkCommandBuffer> commandBuffers; // list container of cmd buffers allocated from this pool
92};
93
94struct devExts {
95    VkBool32 wsi_enabled;
96    unordered_map<VkSwapchainKHR, SWAPCHAIN_NODE *> swapchainMap;
97    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
98};
99
100// fwd decls
101struct shader_module;
102
103struct layer_data {
104    debug_report_data *report_data;
105    std::vector<VkDebugReportCallbackEXT> logging_callback;
106    VkLayerDispatchTable *device_dispatch_table;
107    VkLayerInstanceDispatchTable *instance_dispatch_table;
108#if MTMERGESOURCE
109// MTMERGESOURCE - stuff pulled directly from MT
110    uint64_t currentFenceId;
111    // Maps for tracking key structs related to mem_tracker state
112    // Images and Buffers are 2 objects that can have memory bound to them so they get special treatment
113    unordered_map<uint64_t, MT_OBJ_BINDING_INFO> imageBindingMap;
114    unordered_map<uint64_t, MT_OBJ_BINDING_INFO> bufferBindingMap;
115// MTMERGESOURCE - End of MT stuff
116#endif
117    devExts device_extensions;
118    unordered_set<VkQueue> queues;  // all queues under given device
119    // Global set of all cmdBuffers that are inFlight on this device
120    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
121    // Layer specific data
122    unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> sampleMap;
123    unordered_map<VkImageView, VkImageViewCreateInfo> imageViewMap;
124    unordered_map<VkImage, IMAGE_NODE> imageMap;
125    unordered_map<VkBufferView, VkBufferViewCreateInfo> bufferViewMap;
126    unordered_map<VkBuffer, BUFFER_NODE> bufferMap;
127    unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
128    unordered_map<VkCommandPool, CMD_POOL_INFO> commandPoolMap;
129    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
130    unordered_map<VkDescriptorSet, SET_NODE *> setMap;
131    unordered_map<VkDescriptorSetLayout, LAYOUT_NODE *> descriptorSetLayoutMap;
132    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
133    unordered_map<VkDeviceMemory, DEVICE_MEM_INFO> memObjMap;
134    unordered_map<VkFence, FENCE_NODE> fenceMap;
135    unordered_map<VkQueue, QUEUE_NODE> queueMap;
136    unordered_map<VkEvent, EVENT_NODE> eventMap;
137    unordered_map<QueryObject, bool> queryToStateMap;
138    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
139    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
140    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
141    unordered_map<VkFramebuffer, FRAMEBUFFER_NODE> frameBufferMap;
142    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
143    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
144    unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
145    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
146    // Current render pass
147    VkRenderPassBeginInfo renderPassBeginInfo;
148    uint32_t currentSubpass;
149    VkDevice device;
150
151    // Device specific data
152    PHYS_DEV_PROPERTIES_NODE physDevProperties;
153// MTMERGESOURCE - added a couple of fields to constructor initializer
154    layer_data()
155        : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr),
156#if MTMERGESOURCE
157        currentFenceId(1),
158#endif
159        device_extensions(){};
160};
161
162static const VkLayerProperties cv_global_layers[] = {{
163    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
164}};
165
166template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
167    bool foundLayer = false;
168    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
169        if (!strcmp(createInfo.ppEnabledLayerNames[i], cv_global_layers[0].layerName)) {
170            foundLayer = true;
171        }
172        // This has to be logged to console as we don't have a callback at this point.
173        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
174            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
175                       cv_global_layers[0].layerName);
176        }
177    }
178}
179
180// Code imported from shader_checker
181static void build_def_index(shader_module *);
182
183// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
184// without the caller needing to care too much about the physical SPIRV module layout.
185struct spirv_inst_iter {
186    std::vector<uint32_t>::const_iterator zero;
187    std::vector<uint32_t>::const_iterator it;
188
189    uint32_t len() { return *it >> 16; }
190    uint32_t opcode() { return *it & 0x0ffffu; }
191    uint32_t const &word(unsigned n) { return it[n]; }
192    uint32_t offset() { return (uint32_t)(it - zero); }
193
194    spirv_inst_iter() {}
195
196    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
197
198    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
199
200    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
201
202    spirv_inst_iter operator++(int) { /* x++ */
203        spirv_inst_iter ii = *this;
204        it += len();
205        return ii;
206    }
207
208    spirv_inst_iter operator++() { /* ++x; */
209        it += len();
210        return *this;
211    }
212
213    /* The iterator and the value are the same thing. */
214    spirv_inst_iter &operator*() { return *this; }
215    spirv_inst_iter const &operator*() const { return *this; }
216};
217
218struct shader_module {
219    /* the spirv image itself */
220    vector<uint32_t> words;
221    /* a mapping of <id> to the first word of its def. this is useful because walking type
222     * trees, constant expressions, etc requires jumping all over the instruction stream.
223     */
224    unordered_map<unsigned, unsigned> def_index;
225
226    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
227        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
228          def_index() {
229
230        build_def_index(this);
231    }
232
233    /* expose begin() / end() to enable range-based for */
234    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
235    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
236    /* given an offset into the module, produce an iterator there. */
237    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
238
239    /* gets an iterator to the definition of an id */
240    spirv_inst_iter get_def(unsigned id) const {
241        auto it = def_index.find(id);
242        if (it == def_index.end()) {
243            return end();
244        }
245        return at(it->second);
246    }
247};
248
249// TODO : Do we need to guard access to layer_data_map w/ lock?
250static unordered_map<void *, layer_data *> layer_data_map;
251
252// TODO : This can be much smarter, using separate locks for separate global data
253static int globalLockInitialized = 0;
254static loader_platform_thread_mutex globalLock;
255#if MTMERGESOURCE
256// MTMERGESOURCE - start of direct pull
257static VkPhysicalDeviceMemoryProperties memProps;
258
259static void clear_cmd_buf_and_mem_references(layer_data *my_data, const VkCommandBuffer cb);
260
261#define MAX_BINDING 0xFFFFFFFF
262
263static MT_OBJ_BINDING_INFO *get_object_binding_info(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
264    MT_OBJ_BINDING_INFO *retValue = NULL;
265    switch (type) {
266    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
267        auto it = my_data->imageBindingMap.find(handle);
268        if (it != my_data->imageBindingMap.end())
269            return &(*it).second;
270        break;
271    }
272    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
273        auto it = my_data->bufferBindingMap.find(handle);
274        if (it != my_data->bufferBindingMap.end())
275            return &(*it).second;
276        break;
277    }
278    default:
279        break;
280    }
281    return retValue;
282}
283// MTMERGESOURCE - end section
284#endif
285template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map);
286
287// prototype
288static GLOBAL_CB_NODE *getCBNode(layer_data *, const VkCommandBuffer);
289
290#if MTMERGESOURCE
291static void delete_queue_info_list(layer_data *my_data) {
292    // Process queue list, cleaning up each entry before deleting
293    my_data->queueMap.clear();
294}
295
296// Delete CBInfo from container and clear mem references to CB
297static void delete_cmd_buf_info(layer_data *my_data, VkCommandPool commandPool, const VkCommandBuffer cb) {
298    clear_cmd_buf_and_mem_references(my_data, cb);
299    // Delete the CBInfo info
300    my_data->commandPoolMap[commandPool].commandBuffers.remove(cb);
301    my_data->commandBufferMap.erase(cb);
302}
303
304static void add_object_binding_info(layer_data *my_data, const uint64_t handle, const VkDebugReportObjectTypeEXT type,
305                                    const VkDeviceMemory mem) {
306    switch (type) {
307    // Buffers and images are unique as their CreateInfo is in container struct
308    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
309        auto pCI = &my_data->bufferBindingMap[handle];
310        pCI->mem = mem;
311        break;
312    }
313    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
314        auto pCI = &my_data->imageBindingMap[handle];
315        pCI->mem = mem;
316        break;
317    }
318    default:
319        break;
320    }
321}
322
323static void add_object_create_info(layer_data *my_data, const uint64_t handle, const VkDebugReportObjectTypeEXT type,
324                                   const void *pCreateInfo) {
325    // TODO : For any CreateInfo struct that has ptrs, need to deep copy them and appropriately clean up on Destroy
326    switch (type) {
327    // Buffers and images are unique as their CreateInfo is in container struct
328    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
329        auto pCI = &my_data->bufferBindingMap[handle];
330        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
331        memcpy(&pCI->create_info.buffer, pCreateInfo, sizeof(VkBufferCreateInfo));
332        break;
333    }
334    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
335        auto pCI = &my_data->imageBindingMap[handle];
336        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
337        memcpy(&pCI->create_info.image, pCreateInfo, sizeof(VkImageCreateInfo));
338        break;
339    }
340    // Swap Chain is very unique, use my_data->imageBindingMap, but copy in
341    // SwapChainCreatInfo's usage flags and set the mem value to a unique key. These is used by
342    // vkCreateImageView and internal mem_tracker routines to distinguish swap chain images
343    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT: {
344        auto pCI = &my_data->imageBindingMap[handle];
345        memset(pCI, 0, sizeof(MT_OBJ_BINDING_INFO));
346        pCI->mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
347        pCI->valid = false;
348        pCI->create_info.image.usage =
349            const_cast<VkSwapchainCreateInfoKHR *>(static_cast<const VkSwapchainCreateInfoKHR *>(pCreateInfo))->imageUsage;
350        break;
351    }
352    default:
353        break;
354    }
355}
356
357// Add a fence, creating one if necessary to our list of fences/fenceIds
358static VkBool32 add_fence_info(layer_data *my_data, VkFence fence, VkQueue queue, uint64_t *fenceId) {
359    VkBool32 skipCall = VK_FALSE;
360    *fenceId = my_data->currentFenceId++;
361
362    // If no fence, create an internal fence to track the submissions
363    if (fence != VK_NULL_HANDLE) {
364        my_data->fenceMap[fence].fenceId = *fenceId;
365        my_data->fenceMap[fence].queue = queue;
366        // Validate that fence is in UNSIGNALED state
367        VkFenceCreateInfo *pFenceCI = &(my_data->fenceMap[fence].createInfo);
368        if (pFenceCI->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
369            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
370                               (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
371                               "Fence %#" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
372                               (uint64_t)fence);
373        }
374    } else {
375        // TODO : Do we need to create an internal fence here for tracking purposes?
376    }
377    // Update most recently submitted fence and fenceId for Queue
378    my_data->queueMap[queue].lastSubmittedId = *fenceId;
379    return skipCall;
380}
381
382// Remove a fenceInfo from our list of fences/fenceIds
383static void delete_fence_info(layer_data *my_data, VkFence fence) { my_data->fenceMap.erase(fence); }
384
385// Record information when a fence is known to be signalled
386static void update_fence_tracking(layer_data *my_data, VkFence fence) {
387    auto fence_item = my_data->fenceMap.find(fence);
388    if (fence_item != my_data->fenceMap.end()) {
389        FENCE_NODE *pCurFenceInfo = &(*fence_item).second;
390        VkQueue queue = pCurFenceInfo->queue;
391        auto queue_item = my_data->queueMap.find(queue);
392        if (queue_item != my_data->queueMap.end()) {
393            QUEUE_NODE *pQueueInfo = &(*queue_item).second;
394            if (pQueueInfo->lastRetiredId < pCurFenceInfo->fenceId) {
395                pQueueInfo->lastRetiredId = pCurFenceInfo->fenceId;
396            }
397        }
398    }
399
400    // Update fence state in fenceCreateInfo structure
401    auto pFCI = &(my_data->fenceMap[fence].createInfo);
402    pFCI->flags = static_cast<VkFenceCreateFlags>(pFCI->flags | VK_FENCE_CREATE_SIGNALED_BIT);
403}
404
405// Helper routine that updates the fence list for a specific queue to all-retired
406static void retire_queue_fences(layer_data *my_data, VkQueue queue) {
407    QUEUE_NODE *pQueueInfo = &my_data->queueMap[queue];
408    // Set queue's lastRetired to lastSubmitted indicating all fences completed
409    pQueueInfo->lastRetiredId = pQueueInfo->lastSubmittedId;
410}
411
412// Helper routine that updates all queues to all-retired
413static void retire_device_fences(layer_data *my_data, VkDevice device) {
414    // Process each queue for device
415    // TODO: Add multiple device support
416    for (auto ii = my_data->queueMap.begin(); ii != my_data->queueMap.end(); ++ii) {
417        // Set queue's lastRetired to lastSubmitted indicating all fences completed
418        QUEUE_NODE *pQueueInfo = &(*ii).second;
419        pQueueInfo->lastRetiredId = pQueueInfo->lastSubmittedId;
420    }
421}
422
423// Helper function to validate correct usage bits set for buffers or images
424//  Verify that (actual & desired) flags != 0 or,
425//   if strict is true, verify that (actual & desired) flags == desired
426//  In case of error, report it via dbg callbacks
427static VkBool32 validate_usage_flags(layer_data *my_data, void *disp_obj, VkFlags actual, VkFlags desired, VkBool32 strict,
428                                     uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
429                                     char const *func_name, char const *usage_str) {
430    VkBool32 correct_usage = VK_FALSE;
431    VkBool32 skipCall = VK_FALSE;
432    if (strict)
433        correct_usage = ((actual & desired) == desired);
434    else
435        correct_usage = ((actual & desired) != 0);
436    if (!correct_usage) {
437        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
438                           MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s %#" PRIxLEAST64
439                                                               " used by %s. In this case, %s should have %s set during creation.",
440                           ty_str, obj_handle, func_name, ty_str, usage_str);
441    }
442    return skipCall;
443}
444
445// Helper function to validate usage flags for images
446// Pulls image info and then sends actual vs. desired usage off to helper above where
447//  an error will be flagged if usage is not correct
448static VkBool32 validate_image_usage_flags(layer_data *my_data, void *disp_obj, VkImage image, VkFlags desired, VkBool32 strict,
449                                           char const *func_name, char const *usage_string) {
450    VkBool32 skipCall = VK_FALSE;
451    MT_OBJ_BINDING_INFO *pBindInfo = get_object_binding_info(my_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
452    if (pBindInfo) {
453        skipCall = validate_usage_flags(my_data, disp_obj, pBindInfo->create_info.image.usage, desired, strict, (uint64_t)image,
454                                        VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "image", func_name, usage_string);
455    }
456    return skipCall;
457}
458
459// Helper function to validate usage flags for buffers
460// Pulls buffer info and then sends actual vs. desired usage off to helper above where
461//  an error will be flagged if usage is not correct
462static VkBool32 validate_buffer_usage_flags(layer_data *my_data, void *disp_obj, VkBuffer buffer, VkFlags desired, VkBool32 strict,
463                                            char const *func_name, char const *usage_string) {
464    VkBool32 skipCall = VK_FALSE;
465    MT_OBJ_BINDING_INFO *pBindInfo = get_object_binding_info(my_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
466    if (pBindInfo) {
467        skipCall = validate_usage_flags(my_data, disp_obj, pBindInfo->create_info.buffer.usage, desired, strict, (uint64_t)buffer,
468                                        VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string);
469    }
470    return skipCall;
471}
472
473// Return ptr to info in map container containing mem, or NULL if not found
474//  Calls to this function should be wrapped in mutex
475static DEVICE_MEM_INFO *get_mem_obj_info(layer_data *dev_data, const VkDeviceMemory mem) {
476    auto item = dev_data->memObjMap.find(mem);
477    if (item != dev_data->memObjMap.end()) {
478        return &(*item).second;
479    } else {
480        return NULL;
481    }
482}
483
484static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
485                             const VkMemoryAllocateInfo *pAllocateInfo) {
486    assert(object != NULL);
487
488    memcpy(&my_data->memObjMap[mem].allocInfo, pAllocateInfo, sizeof(VkMemoryAllocateInfo));
489    // TODO:  Update for real hardware, actually process allocation info structures
490    my_data->memObjMap[mem].allocInfo.pNext = NULL;
491    my_data->memObjMap[mem].object = object;
492    my_data->memObjMap[mem].refCount = 0;
493    my_data->memObjMap[mem].mem = mem;
494    my_data->memObjMap[mem].image = VK_NULL_HANDLE;
495    my_data->memObjMap[mem].memRange.offset = 0;
496    my_data->memObjMap[mem].memRange.size = 0;
497    my_data->memObjMap[mem].pData = 0;
498    my_data->memObjMap[mem].pDriverData = 0;
499    my_data->memObjMap[mem].valid = false;
500}
501
502static VkBool32 validate_memory_is_valid(layer_data *dev_data, VkDeviceMemory mem, const char *functionName,
503                                         VkImage image = VK_NULL_HANDLE) {
504    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
505        MT_OBJ_BINDING_INFO *pBindInfo =
506            get_object_binding_info(dev_data, reinterpret_cast<const uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
507        if (pBindInfo && !pBindInfo->valid) {
508            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
509                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
510                           "%s: Cannot read invalid swapchain image %" PRIx64 ", please fill the memory before using.",
511                           functionName, (uint64_t)(image));
512        }
513    } else {
514        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
515        if (pMemObj && !pMemObj->valid) {
516            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
517                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
518                           "%s: Cannot read invalid memory %" PRIx64 ", please fill the memory before using.", functionName,
519                           (uint64_t)(mem));
520        }
521    }
522    return false;
523}
524
525static void set_memory_valid(layer_data *dev_data, VkDeviceMemory mem, bool valid, VkImage image = VK_NULL_HANDLE) {
526    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
527        MT_OBJ_BINDING_INFO *pBindInfo =
528            get_object_binding_info(dev_data, reinterpret_cast<const uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
529        if (pBindInfo) {
530            pBindInfo->valid = valid;
531        }
532    } else {
533        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
534        if (pMemObj) {
535            pMemObj->valid = valid;
536        }
537    }
538}
539
540// Find CB Info and add mem reference to list container
541// Find Mem Obj Info and add CB reference to list container
542static VkBool32 update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
543                                                  const char *apiName) {
544    VkBool32 skipCall = VK_FALSE;
545
546    // Skip validation if this image was created through WSI
547    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
548
549        // First update CB binding in MemObj mini CB list
550        DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
551        if (pMemInfo) {
552            // Search for cmd buffer object in memory object's binding list
553            VkBool32 found = VK_FALSE;
554            if (pMemInfo->pCommandBufferBindings.size() > 0) {
555                for (list<VkCommandBuffer>::iterator it = pMemInfo->pCommandBufferBindings.begin();
556                     it != pMemInfo->pCommandBufferBindings.end(); ++it) {
557                    if ((*it) == cb) {
558                        found = VK_TRUE;
559                        break;
560                    }
561                }
562            }
563            // If not present, add to list
564            if (found == VK_FALSE) {
565                pMemInfo->pCommandBufferBindings.push_front(cb);
566                pMemInfo->refCount++;
567            }
568            // Now update CBInfo's Mem reference list
569            GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
570            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
571            if (pCBNode) {
572                // Search for memory object in cmd buffer's reference list
573                VkBool32 found = VK_FALSE;
574                if (pCBNode->pMemObjList.size() > 0) {
575                    for (auto it = pCBNode->pMemObjList.begin(); it != pCBNode->pMemObjList.end(); ++it) {
576                        if ((*it) == mem) {
577                            found = VK_TRUE;
578                            break;
579                        }
580                    }
581                }
582                // If not present, add to list
583                if (found == VK_FALSE) {
584                    pCBNode->pMemObjList.push_front(mem);
585                }
586            }
587        }
588    }
589    return skipCall;
590}
591
592// Free bindings related to CB
593static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
594    GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
595
596    if (pCBNode) {
597        if (pCBNode->pMemObjList.size() > 0) {
598            list<VkDeviceMemory> mem_obj_list = pCBNode->pMemObjList;
599            for (list<VkDeviceMemory>::iterator it = mem_obj_list.begin(); it != mem_obj_list.end(); ++it) {
600                DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, *it);
601                if (pInfo) {
602                    pInfo->pCommandBufferBindings.remove(cb);
603                    pInfo->refCount--;
604                }
605            }
606            pCBNode->pMemObjList.clear();
607        }
608        pCBNode->validate_functions.clear();
609    }
610}
611
612// Delete the entire CB list
613static void delete_cmd_buf_info_list(layer_data *my_data) {
614    for (auto &cb_node : my_data->commandBufferMap) {
615        clear_cmd_buf_and_mem_references(my_data, cb_node.first);
616    }
617    my_data->commandBufferMap.clear();
618}
619
620// For given MemObjInfo, report Obj & CB bindings
621static VkBool32 reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
622    VkBool32 skipCall = VK_FALSE;
623    size_t cmdBufRefCount = pMemObjInfo->pCommandBufferBindings.size();
624    size_t objRefCount = pMemObjInfo->pObjBindings.size();
625
626    if ((pMemObjInfo->pCommandBufferBindings.size()) != 0) {
627        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
628                           (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
629                           "Attempting to free memory object %#" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
630                           " references",
631                           (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
632    }
633
634    if (cmdBufRefCount > 0 && pMemObjInfo->pCommandBufferBindings.size() > 0) {
635        for (list<VkCommandBuffer>::const_iterator it = pMemObjInfo->pCommandBufferBindings.begin();
636             it != pMemObjInfo->pCommandBufferBindings.end(); ++it) {
637            // TODO : CommandBuffer should be source Obj here
638            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
639                    (uint64_t)(*it), __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
640                    "Command Buffer %p still has a reference to mem obj %#" PRIxLEAST64, (*it), (uint64_t)pMemObjInfo->mem);
641        }
642        // Clear the list of hanging references
643        pMemObjInfo->pCommandBufferBindings.clear();
644    }
645
646    if (objRefCount > 0 && pMemObjInfo->pObjBindings.size() > 0) {
647        for (auto it = pMemObjInfo->pObjBindings.begin(); it != pMemObjInfo->pObjBindings.end(); ++it) {
648            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, it->type, it->handle, __LINE__,
649                    MEMTRACK_FREED_MEM_REF, "MEM", "VK Object %#" PRIxLEAST64 " still has a reference to mem obj %#" PRIxLEAST64,
650                    it->handle, (uint64_t)pMemObjInfo->mem);
651        }
652        // Clear the list of hanging references
653        pMemObjInfo->pObjBindings.clear();
654    }
655    return skipCall;
656}
657
658static VkBool32 deleteMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem) {
659    VkBool32 skipCall = VK_FALSE;
660    auto item = my_data->memObjMap.find(mem);
661    if (item != my_data->memObjMap.end()) {
662        my_data->memObjMap.erase(item);
663    } else {
664        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
665                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
666                           "Request to delete memory object %#" PRIxLEAST64 " not present in memory Object Map", (uint64_t)mem);
667    }
668    return skipCall;
669}
670
671// Check if fence for given CB is completed
672static bool checkCBCompleted(layer_data *my_data, const VkCommandBuffer cb, bool *complete) {
673    GLOBAL_CB_NODE *pCBNode = getCBNode(my_data, cb);
674    VkBool32 skipCall = false;
675    *complete = true;
676
677    if (pCBNode) {
678        if (pCBNode->lastSubmittedQueue != NULL) {
679            VkQueue queue = pCBNode->lastSubmittedQueue;
680            QUEUE_NODE *pQueueInfo = &my_data->queueMap[queue];
681            if (pCBNode->fenceId > pQueueInfo->lastRetiredId) {
682                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
683                                   VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)cb, __LINE__, MEMTRACK_NONE, "MEM",
684                                   "fence %#" PRIxLEAST64 " for CB %p has not been checked for completion",
685                                   (uint64_t)pCBNode->lastSubmittedFence, cb);
686                *complete = false;
687            }
688        }
689    }
690    return skipCall;
691}
692
693static VkBool32 freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, VkBool32 internal) {
694    VkBool32 skipCall = VK_FALSE;
695    // Parse global list to find info w/ mem
696    DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
697    if (pInfo) {
698        if (pInfo->allocInfo.allocationSize == 0 && !internal) {
699            // TODO: Verify against Valid Use section
700            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
701                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
702                               "Attempting to free memory associated with a Persistent Image, %#" PRIxLEAST64 ", "
703                               "this should not be explicitly freed\n",
704                               (uint64_t)mem);
705        } else {
706            // Clear any CB bindings for completed CBs
707            //   TODO : Is there a better place to do this?
708
709            bool commandBufferComplete = false;
710            assert(pInfo->object != VK_NULL_HANDLE);
711            list<VkCommandBuffer>::iterator it = pInfo->pCommandBufferBindings.begin();
712            list<VkCommandBuffer>::iterator temp;
713            while (pInfo->pCommandBufferBindings.size() > 0 && it != pInfo->pCommandBufferBindings.end()) {
714                skipCall |= checkCBCompleted(dev_data, *it, &commandBufferComplete);
715                if (commandBufferComplete) {
716                    temp = it;
717                    ++temp;
718                    clear_cmd_buf_and_mem_references(dev_data, *it);
719                    it = temp;
720                } else {
721                    ++it;
722                }
723            }
724
725            // Now verify that no references to this mem obj remain and remove bindings
726            if (0 != pInfo->refCount) {
727                skipCall |= reportMemReferencesAndCleanUp(dev_data, pInfo);
728            }
729            // Delete mem obj info
730            skipCall |= deleteMemObjInfo(dev_data, object, mem);
731        }
732    }
733    return skipCall;
734}
735
736static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
737    switch (type) {
738    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
739        return "image";
740    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
741        return "buffer";
742    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
743        return "swapchain";
744    default:
745        return "unknown";
746    }
747}
748
749// Remove object binding performs 3 tasks:
750// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
751// 2. Decrement refCount for MemObjInfo
752// 3. Clear mem binding for image/buffer by setting its handle to 0
753// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
754static VkBool32 clear_object_binding(layer_data *dev_data, void *dispObj, uint64_t handle, VkDebugReportObjectTypeEXT type) {
755    // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
756    VkBool32 skipCall = VK_FALSE;
757    MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
758    if (pObjBindInfo) {
759        DEVICE_MEM_INFO *pMemObjInfo = get_mem_obj_info(dev_data, pObjBindInfo->mem);
760        // TODO : Make sure this is a reasonable way to reset mem binding
761        pObjBindInfo->mem = VK_NULL_HANDLE;
762        if (pMemObjInfo) {
763            // This obj is bound to a memory object. Remove the reference to this object in that memory object's list, decrement the
764            // memObj's refcount
765            // and set the objects memory binding pointer to NULL.
766            VkBool32 clearSucceeded = VK_FALSE;
767            for (auto it = pMemObjInfo->pObjBindings.begin(); it != pMemObjInfo->pObjBindings.end(); ++it) {
768                if ((it->handle == handle) && (it->type == type)) {
769                    pMemObjInfo->refCount--;
770                    pMemObjInfo->pObjBindings.erase(it);
771                    clearSucceeded = VK_TRUE;
772                    break;
773                }
774            }
775            if (VK_FALSE == clearSucceeded) {
776                skipCall |=
777                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
778                            "MEM", "While trying to clear mem binding for %s obj %#" PRIxLEAST64
779                                   ", unable to find that object referenced by mem obj %#" PRIxLEAST64,
780                            object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
781            }
782        }
783    }
784    return skipCall;
785}
786
787// For NULL mem case, output warning
788// Make sure given object is in global object map
789//  IF a previous binding existed, output validation error
790//  Otherwise, add reference from objectInfo to memoryInfo
791//  Add reference off of objInfo
792//  device is required for error logging, need a dispatchable
793//  object for that.
794static VkBool32 set_mem_binding(layer_data *dev_data, void *dispatch_object, VkDeviceMemory mem, uint64_t handle,
795                                VkDebugReportObjectTypeEXT type, const char *apiName) {
796    VkBool32 skipCall = VK_FALSE;
797    // Handle NULL case separately, just clear previous binding & decrement reference
798    if (mem == VK_NULL_HANDLE) {
799        // TODO: Verify against Valid Use section of spec.
800        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
801                           "MEM", "In %s, attempting to Bind Obj(%#" PRIxLEAST64 ") to NULL", apiName, handle);
802    } else {
803        MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
804        if (!pObjBindInfo) {
805            skipCall |=
806                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS,
807                        "MEM", "In %s, attempting to update Binding of %s Obj(%#" PRIxLEAST64 ") that's not in global list()",
808                        object_type_to_string(type), apiName, handle);
809        } else {
810            // non-null case so should have real mem obj
811            DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
812            if (pMemInfo) {
813                // TODO : Need to track mem binding for obj and report conflict here
814                DEVICE_MEM_INFO *pPrevBinding = get_mem_obj_info(dev_data, pObjBindInfo->mem);
815                if (pPrevBinding != NULL) {
816                    skipCall |=
817                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
818                                (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
819                                "In %s, attempting to bind memory (%#" PRIxLEAST64 ") to object (%#" PRIxLEAST64
820                                ") which has already been bound to mem object %#" PRIxLEAST64,
821                                apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem);
822                } else {
823                    MT_OBJ_HANDLE_TYPE oht;
824                    oht.handle = handle;
825                    oht.type = type;
826                    pMemInfo->pObjBindings.push_front(oht);
827                    pMemInfo->refCount++;
828                    // For image objects, make sure default memory state is correctly set
829                    // TODO : What's the best/correct way to handle this?
830                    if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
831                        VkImageCreateInfo ici = pObjBindInfo->create_info.image;
832                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
833                            // TODO::  More memory state transition stuff.
834                        }
835                    }
836                    pObjBindInfo->mem = mem;
837                }
838            }
839        }
840    }
841    return skipCall;
842}
843
844// For NULL mem case, clear any previous binding Else...
845// Make sure given object is in its object map
846//  IF a previous binding existed, update binding
847//  Add reference from objectInfo to memoryInfo
848//  Add reference off of object's binding info
849// Return VK_TRUE if addition is successful, VK_FALSE otherwise
850static VkBool32 set_sparse_mem_binding(layer_data *dev_data, void *dispObject, VkDeviceMemory mem, uint64_t handle,
851                                       VkDebugReportObjectTypeEXT type, const char *apiName) {
852    VkBool32 skipCall = VK_FALSE;
853    // Handle NULL case separately, just clear previous binding & decrement reference
854    if (mem == VK_NULL_HANDLE) {
855        skipCall = clear_object_binding(dev_data, dispObject, handle, type);
856    } else {
857        MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(dev_data, handle, type);
858        if (!pObjBindInfo) {
859            skipCall |= log_msg(
860                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS, "MEM",
861                "In %s, attempting to update Binding of Obj(%#" PRIxLEAST64 ") that's not in global list()", apiName, handle);
862        }
863        // non-null case so should have real mem obj
864        DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
865        if (pInfo) {
866            // Search for object in memory object's binding list
867            VkBool32 found = VK_FALSE;
868            if (pInfo->pObjBindings.size() > 0) {
869                for (auto it = pInfo->pObjBindings.begin(); it != pInfo->pObjBindings.end(); ++it) {
870                    if (((*it).handle == handle) && ((*it).type == type)) {
871                        found = VK_TRUE;
872                        break;
873                    }
874                }
875            }
876            // If not present, add to list
877            if (found == VK_FALSE) {
878                MT_OBJ_HANDLE_TYPE oht;
879                oht.handle = handle;
880                oht.type = type;
881                pInfo->pObjBindings.push_front(oht);
882                pInfo->refCount++;
883            }
884            // Need to set mem binding for this object
885            pObjBindInfo->mem = mem;
886        }
887    }
888    return skipCall;
889}
890
891template <typename T>
892void print_object_map_members(layer_data *my_data, void *dispObj, T const &objectName, VkDebugReportObjectTypeEXT objectType,
893                              const char *objectStr) {
894    for (auto const &element : objectName) {
895        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objectType, 0, __LINE__, MEMTRACK_NONE, "MEM",
896                "    %s Object list contains %s Object %#" PRIxLEAST64 " ", objectStr, objectStr, element.first);
897    }
898}
899
900// For given Object, get 'mem' obj that it's bound to or NULL if no binding
901static VkBool32 get_mem_binding_from_object(layer_data *my_data, void *dispObj, const uint64_t handle,
902                                            const VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
903    VkBool32 skipCall = VK_FALSE;
904    *mem = VK_NULL_HANDLE;
905    MT_OBJ_BINDING_INFO *pObjBindInfo = get_object_binding_info(my_data, handle, type);
906    if (pObjBindInfo) {
907        if (pObjBindInfo->mem) {
908            *mem = pObjBindInfo->mem;
909        } else {
910            skipCall =
911                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS,
912                        "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but object has no mem binding", handle);
913        }
914    } else {
915        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
916                           "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but no such object in %s list", handle,
917                           object_type_to_string(type));
918    }
919    return skipCall;
920}
921
922// Print details of MemObjInfo list
923static void print_mem_list(layer_data *dev_data, void *dispObj) {
924    DEVICE_MEM_INFO *pInfo = NULL;
925
926    // Early out if info is not requested
927    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
928        return;
929    }
930
931    // Just printing each msg individually for now, may want to package these into single large print
932    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
933            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
934            dev_data->memObjMap.size());
935    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
936            MEMTRACK_NONE, "MEM", "=============================");
937
938    if (dev_data->memObjMap.size() <= 0)
939        return;
940
941    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
942        pInfo = &(*ii).second;
943
944        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
945                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at %p===", (void *)pInfo);
946        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
947                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: %#" PRIxLEAST64, (uint64_t)(pInfo->mem));
948        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
949                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: %u", pInfo->refCount);
950        if (0 != pInfo->allocInfo.allocationSize) {
951            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&pInfo->allocInfo, "MEM(INFO):         ");
952            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
953                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
954        } else {
955            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
956                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
957        }
958
959        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
960                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
961                pInfo->pObjBindings.size());
962        if (pInfo->pObjBindings.size() > 0) {
963            for (list<MT_OBJ_HANDLE_TYPE>::iterator it = pInfo->pObjBindings.begin(); it != pInfo->pObjBindings.end(); ++it) {
964                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
965                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT %" PRIu64, it->handle);
966            }
967        }
968
969        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
970                __LINE__, MEMTRACK_NONE, "MEM",
971                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
972                pInfo->pCommandBufferBindings.size());
973        if (pInfo->pCommandBufferBindings.size() > 0) {
974            for (list<VkCommandBuffer>::iterator it = pInfo->pCommandBufferBindings.begin();
975                 it != pInfo->pCommandBufferBindings.end(); ++it) {
976                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
977                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB %p", (*it));
978            }
979        }
980    }
981}
982
983static void printCBList(layer_data *my_data, void *dispObj) {
984    GLOBAL_CB_NODE *pCBInfo = NULL;
985
986    // Early out if info is not requested
987    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
988        return;
989    }
990
991    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
992            MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
993            my_data->commandBufferMap.size());
994    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
995            MEMTRACK_NONE, "MEM", "==================");
996
997    if (my_data->commandBufferMap.size() <= 0)
998        return;
999
1000    for (auto &cb_node : my_data->commandBufferMap) {
1001        pCBInfo = cb_node.second;
1002
1003        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
1004                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (%p) has CB %p, fenceId %" PRIx64 ", and fence %#" PRIxLEAST64,
1005                (void *)pCBInfo, (void *)pCBInfo->commandBuffer, pCBInfo->fenceId, (uint64_t)pCBInfo->lastSubmittedFence);
1006
1007        if (pCBInfo->pMemObjList.size() <= 0)
1008            continue;
1009        for (list<VkDeviceMemory>::iterator it = pCBInfo->pMemObjList.begin(); it != pCBInfo->pMemObjList.end(); ++it) {
1010            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
1011                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj %" PRIu64, (uint64_t)(*it));
1012        }
1013    }
1014}
1015
1016#endif
1017
1018// Return a string representation of CMD_TYPE enum
1019static string cmdTypeToString(CMD_TYPE cmd) {
1020    switch (cmd) {
1021    case CMD_BINDPIPELINE:
1022        return "CMD_BINDPIPELINE";
1023    case CMD_BINDPIPELINEDELTA:
1024        return "CMD_BINDPIPELINEDELTA";
1025    case CMD_SETVIEWPORTSTATE:
1026        return "CMD_SETVIEWPORTSTATE";
1027    case CMD_SETLINEWIDTHSTATE:
1028        return "CMD_SETLINEWIDTHSTATE";
1029    case CMD_SETDEPTHBIASSTATE:
1030        return "CMD_SETDEPTHBIASSTATE";
1031    case CMD_SETBLENDSTATE:
1032        return "CMD_SETBLENDSTATE";
1033    case CMD_SETDEPTHBOUNDSSTATE:
1034        return "CMD_SETDEPTHBOUNDSSTATE";
1035    case CMD_SETSTENCILREADMASKSTATE:
1036        return "CMD_SETSTENCILREADMASKSTATE";
1037    case CMD_SETSTENCILWRITEMASKSTATE:
1038        return "CMD_SETSTENCILWRITEMASKSTATE";
1039    case CMD_SETSTENCILREFERENCESTATE:
1040        return "CMD_SETSTENCILREFERENCESTATE";
1041    case CMD_BINDDESCRIPTORSETS:
1042        return "CMD_BINDDESCRIPTORSETS";
1043    case CMD_BINDINDEXBUFFER:
1044        return "CMD_BINDINDEXBUFFER";
1045    case CMD_BINDVERTEXBUFFER:
1046        return "CMD_BINDVERTEXBUFFER";
1047    case CMD_DRAW:
1048        return "CMD_DRAW";
1049    case CMD_DRAWINDEXED:
1050        return "CMD_DRAWINDEXED";
1051    case CMD_DRAWINDIRECT:
1052        return "CMD_DRAWINDIRECT";
1053    case CMD_DRAWINDEXEDINDIRECT:
1054        return "CMD_DRAWINDEXEDINDIRECT";
1055    case CMD_DISPATCH:
1056        return "CMD_DISPATCH";
1057    case CMD_DISPATCHINDIRECT:
1058        return "CMD_DISPATCHINDIRECT";
1059    case CMD_COPYBUFFER:
1060        return "CMD_COPYBUFFER";
1061    case CMD_COPYIMAGE:
1062        return "CMD_COPYIMAGE";
1063    case CMD_BLITIMAGE:
1064        return "CMD_BLITIMAGE";
1065    case CMD_COPYBUFFERTOIMAGE:
1066        return "CMD_COPYBUFFERTOIMAGE";
1067    case CMD_COPYIMAGETOBUFFER:
1068        return "CMD_COPYIMAGETOBUFFER";
1069    case CMD_CLONEIMAGEDATA:
1070        return "CMD_CLONEIMAGEDATA";
1071    case CMD_UPDATEBUFFER:
1072        return "CMD_UPDATEBUFFER";
1073    case CMD_FILLBUFFER:
1074        return "CMD_FILLBUFFER";
1075    case CMD_CLEARCOLORIMAGE:
1076        return "CMD_CLEARCOLORIMAGE";
1077    case CMD_CLEARATTACHMENTS:
1078        return "CMD_CLEARCOLORATTACHMENT";
1079    case CMD_CLEARDEPTHSTENCILIMAGE:
1080        return "CMD_CLEARDEPTHSTENCILIMAGE";
1081    case CMD_RESOLVEIMAGE:
1082        return "CMD_RESOLVEIMAGE";
1083    case CMD_SETEVENT:
1084        return "CMD_SETEVENT";
1085    case CMD_RESETEVENT:
1086        return "CMD_RESETEVENT";
1087    case CMD_WAITEVENTS:
1088        return "CMD_WAITEVENTS";
1089    case CMD_PIPELINEBARRIER:
1090        return "CMD_PIPELINEBARRIER";
1091    case CMD_BEGINQUERY:
1092        return "CMD_BEGINQUERY";
1093    case CMD_ENDQUERY:
1094        return "CMD_ENDQUERY";
1095    case CMD_RESETQUERYPOOL:
1096        return "CMD_RESETQUERYPOOL";
1097    case CMD_COPYQUERYPOOLRESULTS:
1098        return "CMD_COPYQUERYPOOLRESULTS";
1099    case CMD_WRITETIMESTAMP:
1100        return "CMD_WRITETIMESTAMP";
1101    case CMD_INITATOMICCOUNTERS:
1102        return "CMD_INITATOMICCOUNTERS";
1103    case CMD_LOADATOMICCOUNTERS:
1104        return "CMD_LOADATOMICCOUNTERS";
1105    case CMD_SAVEATOMICCOUNTERS:
1106        return "CMD_SAVEATOMICCOUNTERS";
1107    case CMD_BEGINRENDERPASS:
1108        return "CMD_BEGINRENDERPASS";
1109    case CMD_ENDRENDERPASS:
1110        return "CMD_ENDRENDERPASS";
1111    default:
1112        return "UNKNOWN";
1113    }
1114}
1115
1116// SPIRV utility functions
1117static void build_def_index(shader_module *module) {
1118    for (auto insn : *module) {
1119        switch (insn.opcode()) {
1120        /* Types */
1121        case spv::OpTypeVoid:
1122        case spv::OpTypeBool:
1123        case spv::OpTypeInt:
1124        case spv::OpTypeFloat:
1125        case spv::OpTypeVector:
1126        case spv::OpTypeMatrix:
1127        case spv::OpTypeImage:
1128        case spv::OpTypeSampler:
1129        case spv::OpTypeSampledImage:
1130        case spv::OpTypeArray:
1131        case spv::OpTypeRuntimeArray:
1132        case spv::OpTypeStruct:
1133        case spv::OpTypeOpaque:
1134        case spv::OpTypePointer:
1135        case spv::OpTypeFunction:
1136        case spv::OpTypeEvent:
1137        case spv::OpTypeDeviceEvent:
1138        case spv::OpTypeReserveId:
1139        case spv::OpTypeQueue:
1140        case spv::OpTypePipe:
1141            module->def_index[insn.word(1)] = insn.offset();
1142            break;
1143
1144        /* Fixed constants */
1145        case spv::OpConstantTrue:
1146        case spv::OpConstantFalse:
1147        case spv::OpConstant:
1148        case spv::OpConstantComposite:
1149        case spv::OpConstantSampler:
1150        case spv::OpConstantNull:
1151            module->def_index[insn.word(2)] = insn.offset();
1152            break;
1153
1154        /* Specialization constants */
1155        case spv::OpSpecConstantTrue:
1156        case spv::OpSpecConstantFalse:
1157        case spv::OpSpecConstant:
1158        case spv::OpSpecConstantComposite:
1159        case spv::OpSpecConstantOp:
1160            module->def_index[insn.word(2)] = insn.offset();
1161            break;
1162
1163        /* Variables */
1164        case spv::OpVariable:
1165            module->def_index[insn.word(2)] = insn.offset();
1166            break;
1167
1168        /* Functions */
1169        case spv::OpFunction:
1170            module->def_index[insn.word(2)] = insn.offset();
1171            break;
1172
1173        default:
1174            /* We don't care about any other defs for now. */
1175            break;
1176        }
1177    }
1178}
1179
1180static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1181    for (auto insn : *src) {
1182        if (insn.opcode() == spv::OpEntryPoint) {
1183            auto entrypointName = (char const *)&insn.word(3);
1184            auto entrypointStageBits = 1u << insn.word(1);
1185
1186            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1187                return insn;
1188            }
1189        }
1190    }
1191
1192    return src->end();
1193}
1194
1195bool shader_is_spirv(VkShaderModuleCreateInfo const *pCreateInfo) {
1196    uint32_t *words = (uint32_t *)pCreateInfo->pCode;
1197    size_t sizeInWords = pCreateInfo->codeSize / sizeof(uint32_t);
1198
1199    /* Just validate that the header makes sense. */
1200    return sizeInWords >= 5 && words[0] == spv::MagicNumber && words[1] == spv::Version;
1201}
1202
1203static char const *storage_class_name(unsigned sc) {
1204    switch (sc) {
1205    case spv::StorageClassInput:
1206        return "input";
1207    case spv::StorageClassOutput:
1208        return "output";
1209    case spv::StorageClassUniformConstant:
1210        return "const uniform";
1211    case spv::StorageClassUniform:
1212        return "uniform";
1213    case spv::StorageClassWorkgroup:
1214        return "workgroup local";
1215    case spv::StorageClassCrossWorkgroup:
1216        return "workgroup global";
1217    case spv::StorageClassPrivate:
1218        return "private global";
1219    case spv::StorageClassFunction:
1220        return "function";
1221    case spv::StorageClassGeneric:
1222        return "generic";
1223    case spv::StorageClassAtomicCounter:
1224        return "atomic counter";
1225    case spv::StorageClassImage:
1226        return "image";
1227    case spv::StorageClassPushConstant:
1228        return "push constant";
1229    default:
1230        return "unknown";
1231    }
1232}
1233
1234/* get the value of an integral constant */
1235unsigned get_constant_value(shader_module const *src, unsigned id) {
1236    auto value = src->get_def(id);
1237    assert(value != src->end());
1238
1239    if (value.opcode() != spv::OpConstant) {
1240        /* TODO: Either ensure that the specialization transform is already performed on a module we're
1241            considering here, OR -- specialize on the fly now.
1242            */
1243        return 1;
1244    }
1245
1246    return value.word(3);
1247}
1248
1249
1250static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1251    auto insn = src->get_def(type);
1252    assert(insn != src->end());
1253
1254    switch (insn.opcode()) {
1255    case spv::OpTypeBool:
1256        ss << "bool";
1257        break;
1258    case spv::OpTypeInt:
1259        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1260        break;
1261    case spv::OpTypeFloat:
1262        ss << "float" << insn.word(2);
1263        break;
1264    case spv::OpTypeVector:
1265        ss << "vec" << insn.word(3) << " of ";
1266        describe_type_inner(ss, src, insn.word(2));
1267        break;
1268    case spv::OpTypeMatrix:
1269        ss << "mat" << insn.word(3) << " of ";
1270        describe_type_inner(ss, src, insn.word(2));
1271        break;
1272    case spv::OpTypeArray:
1273        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1274        describe_type_inner(ss, src, insn.word(2));
1275        break;
1276    case spv::OpTypePointer:
1277        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1278        describe_type_inner(ss, src, insn.word(3));
1279        break;
1280    case spv::OpTypeStruct: {
1281        ss << "struct of (";
1282        for (unsigned i = 2; i < insn.len(); i++) {
1283            describe_type_inner(ss, src, insn.word(i));
1284            if (i == insn.len() - 1) {
1285                ss << ")";
1286            } else {
1287                ss << ", ";
1288            }
1289        }
1290        break;
1291    }
1292    case spv::OpTypeSampler:
1293        ss << "sampler";
1294        break;
1295    case spv::OpTypeSampledImage:
1296        ss << "sampler+";
1297        describe_type_inner(ss, src, insn.word(2));
1298        break;
1299    case spv::OpTypeImage:
1300        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1301        break;
1302    default:
1303        ss << "oddtype";
1304        break;
1305    }
1306}
1307
1308
1309static std::string describe_type(shader_module const *src, unsigned type) {
1310    std::ostringstream ss;
1311    describe_type_inner(ss, src, type);
1312    return ss.str();
1313}
1314
1315
1316static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool b_arrayed) {
1317    /* walk two type trees together, and complain about differences */
1318    auto a_insn = a->get_def(a_type);
1319    auto b_insn = b->get_def(b_type);
1320    assert(a_insn != a->end());
1321    assert(b_insn != b->end());
1322
1323    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1324        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1325        return types_match(a, b, a_type, b_insn.word(2), false);
1326    }
1327
1328    if (a_insn.opcode() != b_insn.opcode()) {
1329        return false;
1330    }
1331
1332    switch (a_insn.opcode()) {
1333    /* if b_arrayed and we hit a leaf type, then we can't match -- there's nowhere for the extra OpTypeArray to be! */
1334    case spv::OpTypeBool:
1335        return true && !b_arrayed;
1336    case spv::OpTypeInt:
1337        /* match on width, signedness */
1338        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3) && !b_arrayed;
1339    case spv::OpTypeFloat:
1340        /* match on width */
1341        return a_insn.word(2) == b_insn.word(2) && !b_arrayed;
1342    case spv::OpTypeVector:
1343    case spv::OpTypeMatrix:
1344        /* match on element type, count. these all have the same layout. we don't get here if
1345         * b_arrayed -- that is handled above. */
1346        return !b_arrayed && types_match(a, b, a_insn.word(2), b_insn.word(2), b_arrayed) && a_insn.word(3) == b_insn.word(3);
1347    case spv::OpTypeArray:
1348        /* match on element type, count. these all have the same layout. we don't get here if
1349         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1350         * not a literal within OpTypeArray */
1351        return !b_arrayed && types_match(a, b, a_insn.word(2), b_insn.word(2), b_arrayed) &&
1352               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1353    case spv::OpTypeStruct:
1354        /* match on all element types */
1355        {
1356            if (b_arrayed) {
1357                /* for the purposes of matching different levels of arrayness, structs are leaves. */
1358                return false;
1359            }
1360
1361            if (a_insn.len() != b_insn.len()) {
1362                return false; /* structs cannot match if member counts differ */
1363            }
1364
1365            for (unsigned i = 2; i < a_insn.len(); i++) {
1366                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), b_arrayed)) {
1367                    return false;
1368                }
1369            }
1370
1371            return true;
1372        }
1373    case spv::OpTypePointer:
1374        /* match on pointee type. storage class is expected to differ */
1375        return types_match(a, b, a_insn.word(3), b_insn.word(3), b_arrayed);
1376
1377    default:
1378        /* remaining types are CLisms, or may not appear in the interfaces we
1379         * are interested in. Just claim no match.
1380         */
1381        return false;
1382    }
1383}
1384
1385static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1386    auto it = map.find(id);
1387    if (it == map.end())
1388        return def;
1389    else
1390        return it->second;
1391}
1392
1393static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1394    auto insn = src->get_def(type);
1395    assert(insn != src->end());
1396
1397    switch (insn.opcode()) {
1398    case spv::OpTypePointer:
1399        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1400         * we're never actually passing pointers around. */
1401        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1402    case spv::OpTypeArray:
1403        if (strip_array_level) {
1404            return get_locations_consumed_by_type(src, insn.word(2), false);
1405        } else {
1406            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1407        }
1408    case spv::OpTypeMatrix:
1409        /* num locations is the dimension * element size */
1410        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1411    default:
1412        /* everything else is just 1. */
1413        return 1;
1414
1415        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1416         * multiple locations. */
1417    }
1418}
1419
1420typedef std::pair<unsigned, unsigned> location_t;
1421typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1422
1423struct interface_var {
1424    uint32_t id;
1425    uint32_t type_id;
1426    uint32_t offset;
1427    /* TODO: collect the name, too? Isn't required to be present. */
1428};
1429
1430static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1431    while (true) {
1432
1433        if (def.opcode() == spv::OpTypePointer) {
1434            def = src->get_def(def.word(3));
1435        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1436            def = src->get_def(def.word(2));
1437            is_array_of_verts = false;
1438        } else if (def.opcode() == spv::OpTypeStruct) {
1439            return def;
1440        } else {
1441            return src->end();
1442        }
1443    }
1444}
1445
1446static void collect_interface_block_members(layer_data *my_data, shader_module const *src,
1447                                            std::map<location_t, interface_var> &out,
1448                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1449                                            uint32_t id, uint32_t type_id) {
1450    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1451    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts);
1452    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1453        /* this isn't an interface block. */
1454        return;
1455    }
1456
1457    std::unordered_map<unsigned, unsigned> member_components;
1458
1459    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1460    for (auto insn : *src) {
1461        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1462            unsigned member_index = insn.word(2);
1463
1464            if (insn.word(3) == spv::DecorationComponent) {
1465                unsigned component = insn.word(4);
1466                member_components[member_index] = component;
1467            }
1468        }
1469    }
1470
1471    /* Second pass -- produce the output, from Location decorations */
1472    for (auto insn : *src) {
1473        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1474            unsigned member_index = insn.word(2);
1475            unsigned member_type_id = type.word(2 + member_index);
1476
1477            if (insn.word(3) == spv::DecorationLocation) {
1478                unsigned location = insn.word(4);
1479                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1480                auto component_it = member_components.find(member_index);
1481                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1482
1483                for (unsigned int offset = 0; offset < num_locations; offset++) {
1484                    interface_var v;
1485                    v.id = id;
1486                    /* TODO: member index in interface_var too? */
1487                    v.type_id = member_type_id;
1488                    v.offset = offset;
1489                    out[std::make_pair(location + offset, component)] = v;
1490                }
1491            }
1492        }
1493    }
1494}
1495
1496static void collect_interface_by_location(layer_data *my_data, shader_module const *src, spirv_inst_iter entrypoint,
1497                                          spv::StorageClass sinterface, std::map<location_t, interface_var> &out,
1498                                          bool is_array_of_verts) {
1499    std::unordered_map<unsigned, unsigned> var_locations;
1500    std::unordered_map<unsigned, unsigned> var_builtins;
1501    std::unordered_map<unsigned, unsigned> var_components;
1502    std::unordered_map<unsigned, unsigned> blocks;
1503
1504    for (auto insn : *src) {
1505
1506        /* We consider two interface models: SSO rendezvous-by-location, and
1507         * builtins. Complain about anything that fits neither model.
1508         */
1509        if (insn.opcode() == spv::OpDecorate) {
1510            if (insn.word(2) == spv::DecorationLocation) {
1511                var_locations[insn.word(1)] = insn.word(3);
1512            }
1513
1514            if (insn.word(2) == spv::DecorationBuiltIn) {
1515                var_builtins[insn.word(1)] = insn.word(3);
1516            }
1517
1518            if (insn.word(2) == spv::DecorationComponent) {
1519                var_components[insn.word(1)] = insn.word(3);
1520            }
1521
1522            if (insn.word(2) == spv::DecorationBlock) {
1523                blocks[insn.word(1)] = 1;
1524            }
1525        }
1526    }
1527
1528    /* TODO: handle grouped decorations */
1529    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1530     * have the same location, and we DONT want to clobber. */
1531
1532    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1533       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1534       the word to determine which word contains the terminator. */
1535    auto word = 3;
1536    while (entrypoint.word(word) & 0xff000000u) {
1537        ++word;
1538    }
1539    ++word;
1540
1541    for (; word < entrypoint.len(); word++) {
1542        auto insn = src->get_def(entrypoint.word(word));
1543        assert(insn != src->end());
1544        assert(insn.opcode() == spv::OpVariable);
1545
1546        if (insn.word(3) == sinterface) {
1547            unsigned id = insn.word(2);
1548            unsigned type = insn.word(1);
1549
1550            int location = value_or_default(var_locations, id, -1);
1551            int builtin = value_or_default(var_builtins, id, -1);
1552            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1553
1554            /* All variables and interface block members in the Input or Output storage classes
1555             * must be decorated with either a builtin or an explicit location.
1556             *
1557             * TODO: integrate the interface block support here. For now, don't complain --
1558             * a valid SPIRV module will only hit this path for the interface block case, as the
1559             * individual members of the type are decorated, rather than variable declarations.
1560             */
1561
1562            if (location != -1) {
1563                /* A user-defined interface variable, with a location. Where a variable
1564                 * occupied multiple locations, emit one result for each. */
1565                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts);
1566                for (unsigned int offset = 0; offset < num_locations; offset++) {
1567                    interface_var v;
1568                    v.id = id;
1569                    v.type_id = type;
1570                    v.offset = offset;
1571                    out[std::make_pair(location + offset, component)] = v;
1572                }
1573            } else if (builtin == -1) {
1574                /* An interface block instance */
1575                collect_interface_block_members(my_data, src, out, blocks, is_array_of_verts, id, type);
1576            }
1577        }
1578    }
1579}
1580
1581static void collect_interface_by_descriptor_slot(layer_data *my_data, shader_module const *src,
1582                                                 std::unordered_set<uint32_t> const &accessible_ids,
1583                                                 std::map<descriptor_slot_t, interface_var> &out) {
1584
1585    std::unordered_map<unsigned, unsigned> var_sets;
1586    std::unordered_map<unsigned, unsigned> var_bindings;
1587
1588    for (auto insn : *src) {
1589        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1590         * DecorationDescriptorSet and DecorationBinding.
1591         */
1592        if (insn.opcode() == spv::OpDecorate) {
1593            if (insn.word(2) == spv::DecorationDescriptorSet) {
1594                var_sets[insn.word(1)] = insn.word(3);
1595            }
1596
1597            if (insn.word(2) == spv::DecorationBinding) {
1598                var_bindings[insn.word(1)] = insn.word(3);
1599            }
1600        }
1601    }
1602
1603    for (auto id : accessible_ids) {
1604        auto insn = src->get_def(id);
1605        assert(insn != src->end());
1606
1607        if (insn.opcode() == spv::OpVariable &&
1608            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1609            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1610            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1611
1612            auto existing_it = out.find(std::make_pair(set, binding));
1613            if (existing_it != out.end()) {
1614                /* conflict within spv image */
1615                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1616                        __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
1617                        "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition",
1618                        insn.word(2), insn.word(1), storage_class_name(insn.word(3)), existing_it->first.first,
1619                        existing_it->first.second);
1620            }
1621
1622            interface_var v;
1623            v.id = insn.word(2);
1624            v.type_id = insn.word(1);
1625            out[std::make_pair(set, binding)] = v;
1626        }
1627    }
1628}
1629
1630static bool validate_interface_between_stages(layer_data *my_data, shader_module const *producer,
1631                                              spirv_inst_iter producer_entrypoint, char const *producer_name,
1632                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1633                                              char const *consumer_name, bool consumer_arrayed_input) {
1634    std::map<location_t, interface_var> outputs;
1635    std::map<location_t, interface_var> inputs;
1636
1637    bool pass = true;
1638
1639    collect_interface_by_location(my_data, producer, producer_entrypoint, spv::StorageClassOutput, outputs, false);
1640    collect_interface_by_location(my_data, consumer, consumer_entrypoint, spv::StorageClassInput, inputs,
1641                                  consumer_arrayed_input);
1642
1643    auto a_it = outputs.begin();
1644    auto b_it = inputs.begin();
1645
1646    /* maps sorted by key (location); walk them together to find mismatches */
1647    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1648        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1649        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1650        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1651        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1652
1653        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1654            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1655                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1656                        "%s writes to output location %u.%u which is not consumed by %s", producer_name, a_first.first,
1657                        a_first.second, consumer_name)) {
1658                pass = false;
1659            }
1660            a_it++;
1661        } else if (a_at_end || a_first > b_first) {
1662            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1663                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1664                        "%s consumes input location %u.%u which is not written by %s", consumer_name, b_first.first, b_first.second,
1665                        producer_name)) {
1666                pass = false;
1667            }
1668            b_it++;
1669        } else {
1670            if (types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id, consumer_arrayed_input)) {
1671                /* OK! */
1672            } else {
1673                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1674                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1675                            a_first.first, a_first.second,
1676                            describe_type(producer, a_it->second.type_id).c_str(),
1677                            describe_type(consumer, b_it->second.type_id).c_str())) {
1678                    pass = false;
1679                }
1680            }
1681            a_it++;
1682            b_it++;
1683        }
1684    }
1685
1686    return pass;
1687}
1688
1689enum FORMAT_TYPE {
1690    FORMAT_TYPE_UNDEFINED,
1691    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1692    FORMAT_TYPE_SINT,
1693    FORMAT_TYPE_UINT,
1694};
1695
1696static unsigned get_format_type(VkFormat fmt) {
1697    switch (fmt) {
1698    case VK_FORMAT_UNDEFINED:
1699        return FORMAT_TYPE_UNDEFINED;
1700    case VK_FORMAT_R8_SINT:
1701    case VK_FORMAT_R8G8_SINT:
1702    case VK_FORMAT_R8G8B8_SINT:
1703    case VK_FORMAT_R8G8B8A8_SINT:
1704    case VK_FORMAT_R16_SINT:
1705    case VK_FORMAT_R16G16_SINT:
1706    case VK_FORMAT_R16G16B16_SINT:
1707    case VK_FORMAT_R16G16B16A16_SINT:
1708    case VK_FORMAT_R32_SINT:
1709    case VK_FORMAT_R32G32_SINT:
1710    case VK_FORMAT_R32G32B32_SINT:
1711    case VK_FORMAT_R32G32B32A32_SINT:
1712    case VK_FORMAT_B8G8R8_SINT:
1713    case VK_FORMAT_B8G8R8A8_SINT:
1714    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1715    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1716        return FORMAT_TYPE_SINT;
1717    case VK_FORMAT_R8_UINT:
1718    case VK_FORMAT_R8G8_UINT:
1719    case VK_FORMAT_R8G8B8_UINT:
1720    case VK_FORMAT_R8G8B8A8_UINT:
1721    case VK_FORMAT_R16_UINT:
1722    case VK_FORMAT_R16G16_UINT:
1723    case VK_FORMAT_R16G16B16_UINT:
1724    case VK_FORMAT_R16G16B16A16_UINT:
1725    case VK_FORMAT_R32_UINT:
1726    case VK_FORMAT_R32G32_UINT:
1727    case VK_FORMAT_R32G32B32_UINT:
1728    case VK_FORMAT_R32G32B32A32_UINT:
1729    case VK_FORMAT_B8G8R8_UINT:
1730    case VK_FORMAT_B8G8R8A8_UINT:
1731    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1732    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1733        return FORMAT_TYPE_UINT;
1734    default:
1735        return FORMAT_TYPE_FLOAT;
1736    }
1737}
1738
1739/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1740 * for comparison to a VkFormat's characterization above. */
1741static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1742    auto insn = src->get_def(type);
1743    assert(insn != src->end());
1744
1745    switch (insn.opcode()) {
1746    case spv::OpTypeInt:
1747        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1748    case spv::OpTypeFloat:
1749        return FORMAT_TYPE_FLOAT;
1750    case spv::OpTypeVector:
1751        return get_fundamental_type(src, insn.word(2));
1752    case spv::OpTypeMatrix:
1753        return get_fundamental_type(src, insn.word(2));
1754    case spv::OpTypeArray:
1755        return get_fundamental_type(src, insn.word(2));
1756    case spv::OpTypePointer:
1757        return get_fundamental_type(src, insn.word(3));
1758    default:
1759        return FORMAT_TYPE_UNDEFINED;
1760    }
1761}
1762
1763static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1764    uint32_t bit_pos = u_ffs(stage);
1765    return bit_pos - 1;
1766}
1767
1768static bool validate_vi_consistency(layer_data *my_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1769    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1770     * each binding should be specified only once.
1771     */
1772    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1773    bool pass = true;
1774
1775    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1776        auto desc = &vi->pVertexBindingDescriptions[i];
1777        auto &binding = bindings[desc->binding];
1778        if (binding) {
1779            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1780                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1781                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1782                pass = false;
1783            }
1784        } else {
1785            binding = desc;
1786        }
1787    }
1788
1789    return pass;
1790}
1791
1792static bool validate_vi_against_vs_inputs(layer_data *my_data, VkPipelineVertexInputStateCreateInfo const *vi,
1793                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1794    std::map<location_t, interface_var> inputs;
1795    bool pass = true;
1796
1797    collect_interface_by_location(my_data, vs, entrypoint, spv::StorageClassInput, inputs, false);
1798
1799    /* Build index by location */
1800    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1801    if (vi) {
1802        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++)
1803            attribs[vi->pVertexAttributeDescriptions[i].location] = &vi->pVertexAttributeDescriptions[i];
1804    }
1805
1806    auto it_a = attribs.begin();
1807    auto it_b = inputs.begin();
1808
1809    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1810        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1811        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1812        auto a_first = a_at_end ? 0 : it_a->first;
1813        auto b_first = b_at_end ? 0 : it_b->first.first;
1814        if (!a_at_end && (b_at_end || a_first < b_first)) {
1815            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1816                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1817                        "Vertex attribute at location %d not consumed by VS", a_first)) {
1818                pass = false;
1819            }
1820            it_a++;
1821        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1822            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1823                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
1824                        b_first)) {
1825                pass = false;
1826            }
1827            it_b++;
1828        } else {
1829            unsigned attrib_type = get_format_type(it_a->second->format);
1830            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1831
1832            /* type checking */
1833            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1834                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1835                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1836                            "Attribute type of `%s` at location %d does not match VS input type of `%s`",
1837                            string_VkFormat(it_a->second->format), a_first,
1838                            describe_type(vs, it_b->second.type_id).c_str())) {
1839                    pass = false;
1840                }
1841            }
1842
1843            /* OK! */
1844            it_a++;
1845            it_b++;
1846        }
1847    }
1848
1849    return pass;
1850}
1851
1852static bool validate_fs_outputs_against_render_pass(layer_data *my_data, shader_module const *fs,
1853                                                    spirv_inst_iter entrypoint, RENDER_PASS_NODE const *rp, uint32_t subpass) {
1854    const std::vector<VkFormat> &color_formats = rp->subpassColorFormats[subpass];
1855    std::map<location_t, interface_var> outputs;
1856    bool pass = true;
1857
1858    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1859
1860    collect_interface_by_location(my_data, fs, entrypoint, spv::StorageClassOutput, outputs, false);
1861
1862    auto it = outputs.begin();
1863    uint32_t attachment = 0;
1864
1865    /* Walk attachment list and outputs together -- this is a little overpowered since attachments
1866     * are currently dense, but the parallel with matching between shader stages is nice.
1867     */
1868
1869    while ((outputs.size() > 0 && it != outputs.end()) || attachment < color_formats.size()) {
1870        if (attachment == color_formats.size() || (it != outputs.end() && it->first.first < attachment)) {
1871            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1872                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1873                        "FS writes to output location %d with no matching attachment", it->first.first)) {
1874                pass = false;
1875            }
1876            it++;
1877        } else if (it == outputs.end() || it->first.first > attachment) {
1878            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1879                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", attachment)) {
1880                pass = false;
1881            }
1882            attachment++;
1883        } else {
1884            unsigned output_type = get_fundamental_type(fs, it->second.type_id);
1885            unsigned att_type = get_format_type(color_formats[attachment]);
1886
1887            /* type checking */
1888            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1889                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1890                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1891                            "Attachment %d of type `%s` does not match FS output type of `%s`", attachment,
1892                            string_VkFormat(color_formats[attachment]),
1893                            describe_type(fs, it->second.type_id).c_str())) {
1894                    pass = false;
1895                }
1896            }
1897
1898            /* OK! */
1899            it++;
1900            attachment++;
1901        }
1902    }
1903
1904    return pass;
1905}
1906
1907/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
1908 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
1909 * for example.
1910 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1911 *  - NOT the shader input/output interfaces.
1912 *
1913 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1914 * converting parts of this to be generated from the machine-readable spec instead.
1915 */
1916static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) {
1917    std::unordered_set<uint32_t> worklist;
1918    worklist.insert(entrypoint.word(2));
1919
1920    while (!worklist.empty()) {
1921        auto id_iter = worklist.begin();
1922        auto id = *id_iter;
1923        worklist.erase(id_iter);
1924
1925        auto insn = src->get_def(id);
1926        if (insn == src->end()) {
1927            /* id is something we didnt collect in build_def_index. that's OK -- we'll stumble
1928             * across all kinds of things here that we may not care about. */
1929            continue;
1930        }
1931
1932        /* try to add to the output set */
1933        if (!ids.insert(id).second) {
1934            continue; /* if we already saw this id, we don't want to walk it again. */
1935        }
1936
1937        switch (insn.opcode()) {
1938        case spv::OpFunction:
1939            /* scan whole body of the function, enlisting anything interesting */
1940            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1941                switch (insn.opcode()) {
1942                case spv::OpLoad:
1943                case spv::OpAtomicLoad:
1944                case spv::OpAtomicExchange:
1945                case spv::OpAtomicCompareExchange:
1946                case spv::OpAtomicCompareExchangeWeak:
1947                case spv::OpAtomicIIncrement:
1948                case spv::OpAtomicIDecrement:
1949                case spv::OpAtomicIAdd:
1950                case spv::OpAtomicISub:
1951                case spv::OpAtomicSMin:
1952                case spv::OpAtomicUMin:
1953                case spv::OpAtomicSMax:
1954                case spv::OpAtomicUMax:
1955                case spv::OpAtomicAnd:
1956                case spv::OpAtomicOr:
1957                case spv::OpAtomicXor:
1958                    worklist.insert(insn.word(3)); /* ptr */
1959                    break;
1960                case spv::OpStore:
1961                case spv::OpAtomicStore:
1962                    worklist.insert(insn.word(1)); /* ptr */
1963                    break;
1964                case spv::OpAccessChain:
1965                case spv::OpInBoundsAccessChain:
1966                    worklist.insert(insn.word(3)); /* base ptr */
1967                    break;
1968                case spv::OpSampledImage:
1969                case spv::OpImageSampleImplicitLod:
1970                case spv::OpImageSampleExplicitLod:
1971                case spv::OpImageSampleDrefImplicitLod:
1972                case spv::OpImageSampleDrefExplicitLod:
1973                case spv::OpImageSampleProjImplicitLod:
1974                case spv::OpImageSampleProjExplicitLod:
1975                case spv::OpImageSampleProjDrefImplicitLod:
1976                case spv::OpImageSampleProjDrefExplicitLod:
1977                case spv::OpImageFetch:
1978                case spv::OpImageGather:
1979                case spv::OpImageDrefGather:
1980                case spv::OpImageRead:
1981                case spv::OpImage:
1982                case spv::OpImageQueryFormat:
1983                case spv::OpImageQueryOrder:
1984                case spv::OpImageQuerySizeLod:
1985                case spv::OpImageQuerySize:
1986                case spv::OpImageQueryLod:
1987                case spv::OpImageQueryLevels:
1988                case spv::OpImageQuerySamples:
1989                case spv::OpImageSparseSampleImplicitLod:
1990                case spv::OpImageSparseSampleExplicitLod:
1991                case spv::OpImageSparseSampleDrefImplicitLod:
1992                case spv::OpImageSparseSampleDrefExplicitLod:
1993                case spv::OpImageSparseSampleProjImplicitLod:
1994                case spv::OpImageSparseSampleProjExplicitLod:
1995                case spv::OpImageSparseSampleProjDrefImplicitLod:
1996                case spv::OpImageSparseSampleProjDrefExplicitLod:
1997                case spv::OpImageSparseFetch:
1998                case spv::OpImageSparseGather:
1999                case spv::OpImageSparseDrefGather:
2000                case spv::OpImageTexelPointer:
2001                    worklist.insert(insn.word(3)); /* image or sampled image */
2002                    break;
2003                case spv::OpImageWrite:
2004                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
2005                    break;
2006                case spv::OpFunctionCall:
2007                    for (auto i = 3; i < insn.len(); i++) {
2008                        worklist.insert(insn.word(i)); /* fn itself, and all args */
2009                    }
2010                    break;
2011
2012                case spv::OpExtInst:
2013                    for (auto i = 5; i < insn.len(); i++) {
2014                        worklist.insert(insn.word(i)); /* operands to ext inst */
2015                    }
2016                    break;
2017                }
2018            }
2019            break;
2020        }
2021    }
2022}
2023
2024struct shader_stage_attributes {
2025    char const *const name;
2026    bool arrayed_input;
2027};
2028
2029static shader_stage_attributes shader_stage_attribs[] = {
2030    {"vertex shader", false},
2031    {"tessellation control shader", true},
2032    {"tessellation evaluation shader", false},
2033    {"geometry shader", true},
2034    {"fragment shader", false},
2035};
2036
2037static bool validate_push_constant_block_against_pipeline(layer_data *my_data,
2038                                                          std::vector<VkPushConstantRange> const *pushConstantRanges,
2039                                                          shader_module const *src, spirv_inst_iter type,
2040                                                          VkShaderStageFlagBits stage) {
2041    bool pass = true;
2042
2043    /* strip off ptrs etc */
2044    type = get_struct_type(src, type, false);
2045    assert(type != src->end());
2046
2047    /* validate directly off the offsets. this isn't quite correct for arrays
2048     * and matrices, but is a good first step. TODO: arrays, matrices, weird
2049     * sizes */
2050    for (auto insn : *src) {
2051        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
2052
2053            if (insn.word(3) == spv::DecorationOffset) {
2054                unsigned offset = insn.word(4);
2055                auto size = 4; /* bytes; TODO: calculate this based on the type */
2056
2057                bool found_range = false;
2058                for (auto const &range : *pushConstantRanges) {
2059                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
2060                        found_range = true;
2061
2062                        if ((range.stageFlags & stage) == 0) {
2063                            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2064                                        __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2065                                        "Push constant range covering variable starting at "
2066                                        "offset %u not accessible from stage %s",
2067                                        offset, string_VkShaderStageFlagBits(stage))) {
2068                                pass = false;
2069                            }
2070                        }
2071
2072                        break;
2073                    }
2074                }
2075
2076                if (!found_range) {
2077                    if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2078                                __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
2079                                "Push constant range covering variable starting at "
2080                                "offset %u not declared in layout",
2081                                offset)) {
2082                        pass = false;
2083                    }
2084                }
2085            }
2086        }
2087    }
2088
2089    return pass;
2090}
2091
2092static bool validate_push_constant_usage(layer_data *my_data,
2093                                         std::vector<VkPushConstantRange> const *pushConstantRanges, shader_module const *src,
2094                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2095    bool pass = true;
2096
2097    for (auto id : accessible_ids) {
2098        auto def_insn = src->get_def(id);
2099        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
2100            pass &= validate_push_constant_block_against_pipeline(my_data, pushConstantRanges, src,
2101                                                                 src->get_def(def_insn.word(1)), stage);
2102        }
2103    }
2104
2105    return pass;
2106}
2107
2108// For given pipelineLayout verify that the setLayout at slot.first
2109//  has the requested binding at slot.second
2110static VkDescriptorSetLayoutBinding const * get_descriptor_binding(layer_data *my_data, PIPELINE_LAYOUT_NODE *pipelineLayout, descriptor_slot_t slot) {
2111
2112    if (!pipelineLayout)
2113        return nullptr;
2114
2115    if (slot.first >= pipelineLayout->descriptorSetLayouts.size())
2116        return nullptr;
2117
2118    auto const layout_node = my_data->descriptorSetLayoutMap[pipelineLayout->descriptorSetLayouts[slot.first]];
2119
2120    auto bindingIt = layout_node->bindingToIndexMap.find(slot.second);
2121    if ((bindingIt == layout_node->bindingToIndexMap.end()) || (layout_node->createInfo.pBindings == NULL))
2122        return nullptr;
2123
2124    assert(bindingIt->second < layout_node->createInfo.bindingCount);
2125    return &layout_node->createInfo.pBindings[bindingIt->second];
2126}
2127
2128// Block of code at start here for managing/tracking Pipeline state that this layer cares about
2129
2130static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
2131
2132// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2133//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2134//   to that same cmd buffer by separate thread are not changing state from underneath us
2135// Track the last cmd buffer touched by this thread
2136
2137static VkBool32 hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2138    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2139        if (pCB->drawCount[i])
2140            return VK_TRUE;
2141    }
2142    return VK_FALSE;
2143}
2144
2145// Check object status for selected flag state
2146static VkBool32 validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
2147                                DRAW_STATE_ERROR error_code, const char *fail_msg) {
2148    if (!(pNode->status & status_mask)) {
2149        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2150                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS",
2151                       "CB object %#" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg);
2152    }
2153    return VK_FALSE;
2154}
2155
2156// Retrieve pipeline node ptr for given pipeline object
2157static PIPELINE_NODE *getPipeline(layer_data *my_data, const VkPipeline pipeline) {
2158    if (my_data->pipelineMap.find(pipeline) == my_data->pipelineMap.end()) {
2159        return NULL;
2160    }
2161    return my_data->pipelineMap[pipeline];
2162}
2163
2164// Return VK_TRUE if for a given PSO, the given state enum is dynamic, else return VK_FALSE
2165static VkBool32 isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
2166    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2167        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2168            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2169                return VK_TRUE;
2170        }
2171    }
2172    return VK_FALSE;
2173}
2174
2175// Validate state stored as flags at time of draw call
2176static VkBool32 validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe,
2177                                          VkBool32 indexedDraw) {
2178    VkBool32 result;
2179    result = validate_status(dev_data, pCB, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_VIEWPORT_NOT_BOUND,
2180                             "Dynamic viewport state not set for this command buffer");
2181    result |= validate_status(dev_data, pCB, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_SCISSOR_NOT_BOUND,
2182                              "Dynamic scissor state not set for this command buffer");
2183    if ((pPipe->iaStateCI.topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2184        (pPipe->iaStateCI.topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP)) {
2185        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2186                                  DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
2187    }
2188    if (pPipe->rsStateCI.depthBiasEnable) {
2189        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2190                                  DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
2191    }
2192    if (pPipe->blendConstantsEnabled) {
2193        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2194                                  DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer");
2195    }
2196    if (pPipe->dsStateCI.depthBoundsTestEnable) {
2197        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2198                                  DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
2199    }
2200    if (pPipe->dsStateCI.stencilTestEnable) {
2201        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2202                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
2203        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2204                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
2205        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2206                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
2207    }
2208    if (indexedDraw) {
2209        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2210                                  DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2211                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2212    }
2213    return result;
2214}
2215
2216// Verify attachment reference compatibility according to spec
2217//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2218//  If both AttachmentReference arrays have requested index, check their corresponding AttachementDescriptions
2219//   to make sure that format and samples counts match.
2220//  If not, they are not compatible.
2221static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2222                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2223                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2224                                             const VkAttachmentDescription *pSecondaryAttachments) {
2225    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2226        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2227            return true;
2228    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2229        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2230            return true;
2231    } else { // format and sample count must match
2232        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2233             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2234            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2235             pSecondaryAttachments[pSecondary[index].attachment].samples))
2236            return true;
2237    }
2238    // Format and sample counts didn't match
2239    return false;
2240}
2241
2242// For give primary and secondary RenderPass objects, verify that they're compatible
2243static bool verify_renderpass_compatibility(layer_data *my_data, const VkRenderPass primaryRP, const VkRenderPass secondaryRP,
2244                                            string &errorMsg) {
2245    stringstream errorStr;
2246    if (my_data->renderPassMap.find(primaryRP) == my_data->renderPassMap.end()) {
2247        errorStr << "invalid VkRenderPass (" << primaryRP << ")";
2248        errorMsg = errorStr.str();
2249        return false;
2250    } else if (my_data->renderPassMap.find(secondaryRP) == my_data->renderPassMap.end()) {
2251        errorStr << "invalid VkRenderPass (" << secondaryRP << ")";
2252        errorMsg = errorStr.str();
2253        return false;
2254    }
2255    // Trivial pass case is exact same RP
2256    if (primaryRP == secondaryRP) {
2257        return true;
2258    }
2259    const VkRenderPassCreateInfo *primaryRPCI = my_data->renderPassMap[primaryRP]->pCreateInfo;
2260    const VkRenderPassCreateInfo *secondaryRPCI = my_data->renderPassMap[secondaryRP]->pCreateInfo;
2261    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2262        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2263                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2264        errorMsg = errorStr.str();
2265        return false;
2266    }
2267    uint32_t spIndex = 0;
2268    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2269        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2270        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2271        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2272        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2273        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2274            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2275                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2276                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2277                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2278                errorMsg = errorStr.str();
2279                return false;
2280            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2281                                                         primaryColorCount, primaryRPCI->pAttachments,
2282                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2283                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2284                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2285                errorMsg = errorStr.str();
2286                return false;
2287            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2288                                                         primaryColorCount, primaryRPCI->pAttachments,
2289                                                         secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2290                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2291                errorStr << "depth/stencil attachments at index " << cIdx << " of subpass index " << spIndex
2292                         << " are not compatible.";
2293                errorMsg = errorStr.str();
2294                return false;
2295            }
2296        }
2297        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2298        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2299        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2300        for (uint32_t i = 0; i < inputMax; ++i) {
2301            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2302                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2303                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2304                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2305                errorMsg = errorStr.str();
2306                return false;
2307            }
2308        }
2309    }
2310    return true;
2311}
2312
2313// For give SET_NODE, verify that its Set is compatible w/ the setLayout corresponding to pipelineLayout[layoutIndex]
2314static bool verify_set_layout_compatibility(layer_data *my_data, const SET_NODE *pSet, const VkPipelineLayout layout,
2315                                            const uint32_t layoutIndex, string &errorMsg) {
2316    stringstream errorStr;
2317    auto pipeline_layout_it = my_data->pipelineLayoutMap.find(layout);
2318    if (pipeline_layout_it == my_data->pipelineLayoutMap.end()) {
2319        errorStr << "invalid VkPipelineLayout (" << layout << ")";
2320        errorMsg = errorStr.str();
2321        return false;
2322    }
2323    if (layoutIndex >= pipeline_layout_it->second.descriptorSetLayouts.size()) {
2324        errorStr << "VkPipelineLayout (" << layout << ") only contains " << pipeline_layout_it->second.descriptorSetLayouts.size()
2325                 << " setLayouts corresponding to sets 0-" << pipeline_layout_it->second.descriptorSetLayouts.size() - 1
2326                 << ", but you're attempting to bind set to index " << layoutIndex;
2327        errorMsg = errorStr.str();
2328        return false;
2329    }
2330    // Get the specific setLayout from PipelineLayout that overlaps this set
2331    LAYOUT_NODE *pLayoutNode = my_data->descriptorSetLayoutMap[pipeline_layout_it->second.descriptorSetLayouts[layoutIndex]];
2332    if (pLayoutNode->layout == pSet->pLayout->layout) { // trivial pass case
2333        return true;
2334    }
2335    size_t descriptorCount = pLayoutNode->descriptorTypes.size();
2336    if (descriptorCount != pSet->pLayout->descriptorTypes.size()) {
2337        errorStr << "setLayout " << layoutIndex << " from pipelineLayout " << layout << " has " << descriptorCount
2338                 << " descriptors, but corresponding set being bound has " << pSet->pLayout->descriptorTypes.size()
2339                 << " descriptors.";
2340        errorMsg = errorStr.str();
2341        return false; // trivial fail case
2342    }
2343    // Now need to check set against corresponding pipelineLayout to verify compatibility
2344    for (size_t i = 0; i < descriptorCount; ++i) {
2345        // Need to verify that layouts are identically defined
2346        //  TODO : Is below sufficient? Making sure that types & stageFlags match per descriptor
2347        //    do we also need to check immutable samplers?
2348        if (pLayoutNode->descriptorTypes[i] != pSet->pLayout->descriptorTypes[i]) {
2349            errorStr << "descriptor " << i << " for descriptorSet being bound is type '"
2350                     << string_VkDescriptorType(pSet->pLayout->descriptorTypes[i])
2351                     << "' but corresponding descriptor from pipelineLayout is type '"
2352                     << string_VkDescriptorType(pLayoutNode->descriptorTypes[i]) << "'";
2353            errorMsg = errorStr.str();
2354            return false;
2355        }
2356        if (pLayoutNode->stageFlags[i] != pSet->pLayout->stageFlags[i]) {
2357            errorStr << "stageFlags " << i << " for descriptorSet being bound is " << pSet->pLayout->stageFlags[i]
2358                     << "' but corresponding descriptor from pipelineLayout has stageFlags " << pLayoutNode->stageFlags[i];
2359            errorMsg = errorStr.str();
2360            return false;
2361        }
2362    }
2363    return true;
2364}
2365
2366// Validate that data for each specialization entry is fully contained within the buffer.
2367static VkBool32 validate_specialization_offsets(layer_data *my_data, VkPipelineShaderStageCreateInfo const *info) {
2368    VkBool32 pass = VK_TRUE;
2369
2370    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2371
2372    if (spec) {
2373        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2374            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2375                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2376                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2377                            "Specialization entry %u (for constant id %u) references memory outside provided "
2378                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2379                            " bytes provided)",
2380                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2381                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2382
2383                    pass = VK_FALSE;
2384                }
2385            }
2386        }
2387    }
2388
2389    return pass;
2390}
2391
2392static bool descriptor_type_match(layer_data *my_data, shader_module const *module, uint32_t type_id,
2393                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2394    auto type = module->get_def(type_id);
2395
2396    descriptor_count = 1;
2397
2398    /* Strip off any array or ptrs. Where we remove array levels, adjust the
2399     * descriptor count for each dimension. */
2400    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2401        if (type.opcode() == spv::OpTypeArray) {
2402            descriptor_count *= get_constant_value(module, type.word(3));
2403            type = module->get_def(type.word(2));
2404        }
2405        else {
2406            type = module->get_def(type.word(3));
2407        }
2408    }
2409
2410    switch (type.opcode()) {
2411    case spv::OpTypeStruct: {
2412        for (auto insn : *module) {
2413            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2414                if (insn.word(2) == spv::DecorationBlock) {
2415                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2416                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2417                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2418                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2419                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2420                }
2421            }
2422        }
2423
2424        /* Invalid */
2425        return false;
2426    }
2427
2428    case spv::OpTypeSampler:
2429        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER;
2430
2431    case spv::OpTypeSampledImage:
2432        if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2433            /* Slight relaxation for some GLSL historical madness: samplerBuffer
2434             * doesn't really have a sampler, and a texel buffer descriptor
2435             * doesn't really provide one. Allow this slight mismatch.
2436             */
2437            auto image_type = module->get_def(type.word(2));
2438            auto dim = image_type.word(3);
2439            auto sampled = image_type.word(7);
2440            return dim == spv::DimBuffer && sampled == 1;
2441        }
2442        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2443
2444    case spv::OpTypeImage: {
2445        /* Many descriptor types backing image types-- depends on dimension
2446         * and whether the image will be used with a sampler. SPIRV for
2447         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2448         * runtime is unacceptable.
2449         */
2450        auto dim = type.word(3);
2451        auto sampled = type.word(7);
2452
2453        if (dim == spv::DimSubpassData) {
2454            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2455        } else if (dim == spv::DimBuffer) {
2456            if (sampled == 1) {
2457                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2458            } else {
2459                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2460            }
2461        } else if (sampled == 1) {
2462            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
2463        } else {
2464            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2465        }
2466    }
2467
2468    /* We shouldn't really see any other junk types -- but if we do, they're
2469     * a mismatch.
2470     */
2471    default:
2472        return false; /* Mismatch */
2473    }
2474}
2475
2476static VkBool32 require_feature(layer_data *my_data, VkBool32 feature, char const *feature_name) {
2477    if (!feature) {
2478        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2479                    __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2480                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2481                    "enabled on the device",
2482                    feature_name)) {
2483            return false;
2484        }
2485    }
2486
2487    return true;
2488}
2489
2490static VkBool32 validate_shader_capabilities(layer_data *my_data, shader_module const *src)
2491{
2492    VkBool32 pass = VK_TRUE;
2493
2494    auto enabledFeatures = &my_data->physDevProperties.features;
2495
2496    for (auto insn : *src) {
2497        if (insn.opcode() == spv::OpCapability) {
2498            switch (insn.word(1)) {
2499            case spv::CapabilityMatrix:
2500            case spv::CapabilityShader:
2501            case spv::CapabilityInputAttachment:
2502            case spv::CapabilitySampled1D:
2503            case spv::CapabilityImage1D:
2504            case spv::CapabilitySampledBuffer:
2505            case spv::CapabilityImageBuffer:
2506            case spv::CapabilityImageQuery:
2507            case spv::CapabilityDerivativeControl:
2508                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2509                break;
2510
2511            case spv::CapabilityGeometry:
2512                pass &= require_feature(my_data, enabledFeatures->geometryShader, "geometryShader");
2513                break;
2514
2515            case spv::CapabilityTessellation:
2516                pass &= require_feature(my_data, enabledFeatures->tessellationShader, "tessellationShader");
2517                break;
2518
2519            case spv::CapabilityFloat64:
2520                pass &= require_feature(my_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2521                break;
2522
2523            case spv::CapabilityInt64:
2524                pass &= require_feature(my_data, enabledFeatures->shaderInt64, "shaderInt64");
2525                break;
2526
2527            case spv::CapabilityTessellationPointSize:
2528            case spv::CapabilityGeometryPointSize:
2529                pass &= require_feature(my_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2530                                        "shaderTessellationAndGeometryPointSize");
2531                break;
2532
2533            case spv::CapabilityImageGatherExtended:
2534                pass &= require_feature(my_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2535                break;
2536
2537            case spv::CapabilityStorageImageMultisample:
2538                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2539                break;
2540
2541            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2542                pass &= require_feature(my_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2543                                        "shaderUniformBufferArrayDynamicIndexing");
2544                break;
2545
2546            case spv::CapabilitySampledImageArrayDynamicIndexing:
2547                pass &= require_feature(my_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2548                                        "shaderSampledImageArrayDynamicIndexing");
2549                break;
2550
2551            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2552                pass &= require_feature(my_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2553                                        "shaderStorageBufferArrayDynamicIndexing");
2554                break;
2555
2556            case spv::CapabilityStorageImageArrayDynamicIndexing:
2557                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2558                                        "shaderStorageImageArrayDynamicIndexing");
2559                break;
2560
2561            case spv::CapabilityClipDistance:
2562                pass &= require_feature(my_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2563                break;
2564
2565            case spv::CapabilityCullDistance:
2566                pass &= require_feature(my_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2567                break;
2568
2569            case spv::CapabilityImageCubeArray:
2570                pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2571                break;
2572
2573            case spv::CapabilitySampleRateShading:
2574                pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2575                break;
2576
2577            case spv::CapabilitySparseResidency:
2578                pass &= require_feature(my_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2579                break;
2580
2581            case spv::CapabilityMinLod:
2582                pass &= require_feature(my_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2583                break;
2584
2585            case spv::CapabilitySampledCubeArray:
2586                pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2587                break;
2588
2589            case spv::CapabilityImageMSArray:
2590                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2591                break;
2592
2593            case spv::CapabilityStorageImageExtendedFormats:
2594                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageExtendedFormats,
2595                                        "shaderStorageImageExtendedFormats");
2596                break;
2597
2598            case spv::CapabilityInterpolationFunction:
2599                pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2600                break;
2601
2602            case spv::CapabilityStorageImageReadWithoutFormat:
2603                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2604                                        "shaderStorageImageReadWithoutFormat");
2605                break;
2606
2607            case spv::CapabilityStorageImageWriteWithoutFormat:
2608                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2609                                        "shaderStorageImageWriteWithoutFormat");
2610                break;
2611
2612            case spv::CapabilityMultiViewport:
2613                pass &= require_feature(my_data, enabledFeatures->multiViewport, "multiViewport");
2614                break;
2615
2616            default:
2617                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2618                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2619                            "Shader declares capability %u, not supported in Vulkan.",
2620                            insn.word(1)))
2621                    pass = VK_FALSE;
2622                break;
2623            }
2624        }
2625    }
2626
2627    return pass;
2628}
2629
2630
2631
2632static VkBool32 validate_pipeline_shader_stage(layer_data *dev_data,
2633                                               VkPipelineShaderStageCreateInfo const *pStage,
2634                                               PIPELINE_NODE *pipeline,
2635                                               PIPELINE_LAYOUT_NODE *pipelineLayout,
2636                                               shader_module **out_module,
2637                                               spirv_inst_iter *out_entrypoint)
2638{
2639    VkBool32 pass = VK_TRUE;
2640    auto module = *out_module = dev_data->shaderModuleMap[pStage->module].get();
2641    pass &= validate_specialization_offsets(dev_data, pStage);
2642
2643    /* find the entrypoint */
2644    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2645    if (entrypoint == module->end()) {
2646        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2647                    __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2648                    "No entrypoint found named `%s` for stage %s", pStage->pName,
2649                    string_VkShaderStageFlagBits(pStage->stage))) {
2650            pass = VK_FALSE;
2651        }
2652    }
2653
2654    /* validate shader capabilities against enabled device features */
2655    pass &= validate_shader_capabilities(dev_data, module);
2656
2657    /* mark accessible ids */
2658    std::unordered_set<uint32_t> accessible_ids;
2659    mark_accessible_ids(module, entrypoint, accessible_ids);
2660
2661    /* validate descriptor set layout against what the entrypoint actually uses */
2662    std::map<descriptor_slot_t, interface_var> descriptor_uses;
2663    collect_interface_by_descriptor_slot(dev_data, module, accessible_ids, descriptor_uses);
2664
2665    /* validate push constant usage */
2666    pass &= validate_push_constant_usage(dev_data, &pipelineLayout->pushConstantRanges,
2667                                        module, accessible_ids, pStage->stage);
2668
2669    /* validate descriptor use */
2670    for (auto use : descriptor_uses) {
2671        // While validating shaders capture which slots are used by the pipeline
2672        pipeline->active_slots[use.first.first].insert(use.first.second);
2673
2674        /* find the matching binding */
2675        auto binding = get_descriptor_binding(dev_data, pipelineLayout, use.first);
2676        unsigned required_descriptor_count;
2677
2678        if (!binding) {
2679            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2680                        __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2681                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2682                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2683                pass = VK_FALSE;
2684            }
2685        } else if (~binding->stageFlags & pStage->stage) {
2686            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2687                        /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2688                        "Shader uses descriptor slot %u.%u (used "
2689                        "as type `%s`) but descriptor not "
2690                        "accessible from stage %s",
2691                        use.first.first, use.first.second,
2692                        describe_type(module, use.second.type_id).c_str(),
2693                        string_VkShaderStageFlagBits(pStage->stage))) {
2694                pass = VK_FALSE;
2695            }
2696        } else if (!descriptor_type_match(dev_data, module, use.second.type_id, binding->descriptorType, /*out*/ required_descriptor_count)) {
2697            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2698                        __LINE__, SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2699                        "Type mismatch on descriptor slot "
2700                        "%u.%u (used as type `%s`) but "
2701                        "descriptor of type %s",
2702                        use.first.first, use.first.second,
2703                        describe_type(module, use.second.type_id).c_str(),
2704                        string_VkDescriptorType(binding->descriptorType))) {
2705                pass = VK_FALSE;
2706            }
2707        } else if (binding->descriptorCount < required_descriptor_count) {
2708            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2709                        __LINE__, SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2710                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2711                        required_descriptor_count, use.first.first, use.first.second,
2712                        describe_type(module, use.second.type_id).c_str(),
2713                        binding->descriptorCount)) {
2714                pass = VK_FALSE;
2715            }
2716        }
2717    }
2718
2719    return pass;
2720}
2721
2722
2723// Validate that the shaders used by the given pipeline and store the active_slots
2724//  that are actually used by the pipeline into pPipeline->active_slots
2725static VkBool32 validate_and_capture_pipeline_shader_state(layer_data *my_data, PIPELINE_NODE *pPipeline) {
2726    VkGraphicsPipelineCreateInfo const *pCreateInfo = &pPipeline->graphicsPipelineCI;
2727    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2728    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2729
2730    shader_module *shaders[5];
2731    memset(shaders, 0, sizeof(shaders));
2732    spirv_inst_iter entrypoints[5];
2733    memset(entrypoints, 0, sizeof(entrypoints));
2734    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2735    VkBool32 pass = VK_TRUE;
2736
2737    auto pipelineLayout = pCreateInfo->layout != VK_NULL_HANDLE ? &my_data->pipelineLayoutMap[pCreateInfo->layout] : nullptr;
2738
2739    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2740        VkPipelineShaderStageCreateInfo const *pStage = &pCreateInfo->pStages[i];
2741        auto stage_id = get_shader_stage_id(pStage->stage);
2742        pass &= validate_pipeline_shader_stage(my_data, pStage, pPipeline, pipelineLayout,
2743                                               &shaders[stage_id], &entrypoints[stage_id]);
2744    }
2745
2746    vi = pCreateInfo->pVertexInputState;
2747
2748    if (vi) {
2749        pass &= validate_vi_consistency(my_data, vi);
2750    }
2751
2752    if (shaders[vertex_stage]) {
2753        pass &= validate_vi_against_vs_inputs(my_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2754    }
2755
2756    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2757    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2758
2759    while (!shaders[producer] && producer != fragment_stage) {
2760        producer++;
2761        consumer++;
2762    }
2763
2764    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2765        assert(shaders[producer]);
2766        if (shaders[consumer]) {
2767            pass &= validate_interface_between_stages(my_data, shaders[producer], entrypoints[producer],
2768                                                     shader_stage_attribs[producer].name, shaders[consumer], entrypoints[consumer],
2769                                                     shader_stage_attribs[consumer].name,
2770                                                     shader_stage_attribs[consumer].arrayed_input);
2771
2772            producer = consumer;
2773        }
2774    }
2775
2776    auto rp = pCreateInfo->renderPass != VK_NULL_HANDLE ? my_data->renderPassMap[pCreateInfo->renderPass] : nullptr;
2777
2778    if (shaders[fragment_stage] && rp) {
2779        pass &= validate_fs_outputs_against_render_pass(my_data, shaders[fragment_stage], entrypoints[fragment_stage], rp,
2780                                                       pCreateInfo->subpass);
2781    }
2782
2783    return pass;
2784}
2785
2786static VkBool32 validate_compute_pipeline(layer_data *my_data, PIPELINE_NODE *pPipeline) {
2787    VkComputePipelineCreateInfo const *pCreateInfo = &pPipeline->computePipelineCI;
2788
2789    auto pipelineLayout = pCreateInfo->layout != VK_NULL_HANDLE ? &my_data->pipelineLayoutMap[pCreateInfo->layout] : nullptr;
2790
2791    shader_module *module;
2792    spirv_inst_iter entrypoint;
2793
2794    return validate_pipeline_shader_stage(my_data, &pCreateInfo->stage, pPipeline, pipelineLayout,
2795                                          &module, &entrypoint);
2796}
2797
2798// Return Set node ptr for specified set or else NULL
2799static SET_NODE *getSetNode(layer_data *my_data, const VkDescriptorSet set) {
2800    if (my_data->setMap.find(set) == my_data->setMap.end()) {
2801        return NULL;
2802    }
2803    return my_data->setMap[set];
2804}
2805
2806// For given Layout Node and binding, return index where that binding begins
2807static uint32_t getBindingStartIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) {
2808    uint32_t offsetIndex = 0;
2809    for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
2810        if (pLayout->createInfo.pBindings[i].binding == binding)
2811            break;
2812        offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount;
2813    }
2814    return offsetIndex;
2815}
2816
2817// For given layout node and binding, return last index that is updated
2818static uint32_t getBindingEndIndex(const LAYOUT_NODE *pLayout, const uint32_t binding) {
2819    uint32_t offsetIndex = 0;
2820    for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
2821        offsetIndex += pLayout->createInfo.pBindings[i].descriptorCount;
2822        if (pLayout->createInfo.pBindings[i].binding == binding)
2823            break;
2824    }
2825    return offsetIndex - 1;
2826}
2827
2828// For the given command buffer, verify and update the state for activeSetBindingsPairs
2829//  This includes:
2830//  1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound.
2831//     To be valid, the dynamic offset combined with the offset and range from its
2832//     descriptor update must not overflow the size of its buffer being updated
2833//  2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images
2834//  3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers
2835static VkBool32 validate_and_update_drawtime_descriptor_state(
2836    layer_data *dev_data, GLOBAL_CB_NODE *pCB,
2837    const vector<std::pair<SET_NODE *, unordered_set<uint32_t>>> &activeSetBindingsPairs) {
2838    VkBool32 result = VK_FALSE;
2839
2840    VkWriteDescriptorSet *pWDS = NULL;
2841    uint32_t dynOffsetIndex = 0;
2842    VkDeviceSize bufferSize = 0;
2843    for (auto set_bindings_pair : activeSetBindingsPairs) {
2844        SET_NODE *set_node = set_bindings_pair.first;
2845        LAYOUT_NODE *layout_node = set_node->pLayout;
2846        for (auto binding : set_bindings_pair.second) {
2847            uint32_t startIdx = getBindingStartIndex(layout_node, binding);
2848            uint32_t endIdx = getBindingEndIndex(layout_node, binding);
2849            for (uint32_t i = startIdx; i <= endIdx; ++i) {
2850                // TODO : Flag error here if set_node->pDescriptorUpdates[i] is NULL
2851                switch (set_node->pDescriptorUpdates[i]->sType) {
2852                case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
2853                    pWDS = (VkWriteDescriptorSet *)set_node->pDescriptorUpdates[i];
2854                    if ((pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
2855                        (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
2856                        for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2857                            bufferSize = dev_data->bufferMap[pWDS->pBufferInfo[j].buffer].create_info->size;
2858                            uint32_t dynOffset = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].dynamicOffsets[dynOffsetIndex];
2859                            if (pWDS->pBufferInfo[j].range == VK_WHOLE_SIZE) {
2860                                if ((dynOffset + pWDS->pBufferInfo[j].offset) > bufferSize) {
2861                                    result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2862                                                      VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2863                                                      reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2864                                                      DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
2865                                                      "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has range of "
2866                                                      "VK_WHOLE_SIZE but dynamic offset %#" PRIxLEAST32 ". "
2867                                                      "combined with offset %#" PRIxLEAST64 " oversteps its buffer (%#" PRIxLEAST64
2868                                                      ") which has a size of %#" PRIxLEAST64 ".",
2869                                                      reinterpret_cast<const uint64_t &>(set_node->set), i,
2870                                                      pCB->dynamicOffsets[dynOffsetIndex], pWDS->pBufferInfo[j].offset,
2871                                                      reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2872                                }
2873                            } else if ((dynOffset + pWDS->pBufferInfo[j].offset + pWDS->pBufferInfo[j].range) > bufferSize) {
2874                                result |= log_msg(
2875                                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2876                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2877                                    reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW,
2878                                    "DS",
2879                                    "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has dynamic offset %#" PRIxLEAST32 ". "
2880                                    "Combined with offset %#" PRIxLEAST64 " and range %#" PRIxLEAST64
2881                                    " from its update, this oversteps its buffer "
2882                                    "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".",
2883                                    reinterpret_cast<const uint64_t &>(set_node->set), i, pCB->dynamicOffsets[dynOffsetIndex],
2884                                    pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range,
2885                                    reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2886                            } else if ((dynOffset + pWDS->pBufferInfo[j].offset + pWDS->pBufferInfo[j].range) > bufferSize) {
2887                                result |= log_msg(
2888                                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2889                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2890                                    reinterpret_cast<const uint64_t &>(set_node->set), __LINE__, DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW,
2891                                    "DS",
2892                                    "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has dynamic offset %#" PRIxLEAST32 ". "
2893                                    "Combined with offset %#" PRIxLEAST64 " and range %#" PRIxLEAST64
2894                                    " from its update, this oversteps its buffer "
2895                                    "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".",
2896                                    reinterpret_cast<const uint64_t &>(set_node->set), i, pCB->dynamicOffsets[dynOffsetIndex],
2897                                    pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range,
2898                                    reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2899                            }
2900                            dynOffsetIndex++;
2901                        }
2902                    } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
2903                        for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2904                            pCB->updateImages.insert(pWDS->pImageInfo[j].imageView);
2905                        }
2906                    } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
2907                        for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2908                            assert(dev_data->bufferViewMap.find(pWDS->pTexelBufferView[j]) != dev_data->bufferViewMap.end());
2909                            pCB->updateBuffers.insert(dev_data->bufferViewMap[pWDS->pTexelBufferView[j]].buffer);
2910                        }
2911                    } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2912                               pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
2913                        for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2914                            pCB->updateBuffers.insert(pWDS->pBufferInfo[j].buffer);
2915                        }
2916                    }
2917                    i += pWDS->descriptorCount; // Advance i to end of this set of descriptors (++i at end of for loop will move 1
2918                                                // index past last of these descriptors)
2919                    break;
2920                default: // Currently only shadowing Write update nodes so shouldn't get here
2921                    assert(0);
2922                    continue;
2923                }
2924            }
2925        }
2926    }
2927    return result;
2928}
2929// TODO : This is a temp function that naively updates bound storage images and buffers based on which descriptor sets are bound.
2930//   When validate_and_update_draw_state() handles computer shaders so that active_slots is correct for compute pipelines, this
2931//   function can be killed and validate_and_update_draw_state() used instead
2932static void update_shader_storage_images_and_buffers(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
2933    VkWriteDescriptorSet *pWDS = nullptr;
2934    SET_NODE *pSet = nullptr;
2935    // For the bound descriptor sets, pull off any storage images and buffers
2936    //  This may be more than are actually updated depending on which are active, but for now this is a stop-gap for compute
2937    //  pipelines
2938    for (auto set : pCB->lastBound[VK_PIPELINE_BIND_POINT_COMPUTE].uniqueBoundSets) {
2939        // Get the set node
2940        pSet = getSetNode(dev_data, set);
2941        // For each update in the set
2942        for (auto pUpdate : pSet->pDescriptorUpdates) {
2943            // If it's a write update to STORAGE type capture image/buffer being updated
2944            if (pUpdate && (pUpdate->sType == VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET)) {
2945                pWDS = reinterpret_cast<VkWriteDescriptorSet *>(pUpdate);
2946                if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
2947                    for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2948                        pCB->updateImages.insert(pWDS->pImageInfo[j].imageView);
2949                    }
2950                } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
2951                    for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2952                        pCB->updateBuffers.insert(dev_data->bufferViewMap[pWDS->pTexelBufferView[j]].buffer);
2953                    }
2954                } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2955                           pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
2956                    for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2957                        pCB->updateBuffers.insert(pWDS->pBufferInfo[j].buffer);
2958                    }
2959                }
2960            }
2961        }
2962    }
2963}
2964
2965// Validate overall state at the time of a draw call
2966static VkBool32 validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, const VkBool32 indexedDraw,
2967                                               const VkPipelineBindPoint bindPoint) {
2968    VkBool32 result = VK_FALSE;
2969    auto const &state = pCB->lastBound[bindPoint];
2970    PIPELINE_NODE *pPipe = getPipeline(my_data, state.pipeline);
2971    // First check flag states
2972    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
2973        result = validate_draw_state_flags(my_data, pCB, pPipe, indexedDraw);
2974
2975    // Now complete other state checks
2976    // TODO : Currently only performing next check if *something* was bound (non-zero last bound)
2977    //  There is probably a better way to gate when this check happens, and to know if something *should* have been bound
2978    //  We should have that check separately and then gate this check based on that check
2979    if (pPipe) {
2980        if (state.pipelineLayout) {
2981            string errorString;
2982            // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
2983            vector<std::pair<SET_NODE *, unordered_set<uint32_t>>> activeSetBindingsPairs;
2984            for (auto setBindingPair : pPipe->active_slots) {
2985                uint32_t setIndex = setBindingPair.first;
2986                // If valid set is not bound throw an error
2987                if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
2988                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2989                                      __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
2990                                      "VkPipeline %#" PRIxLEAST64 " uses set #%u but that set is not bound.",
2991                                      (uint64_t)pPipe->pipeline, setIndex);
2992                } else if (!verify_set_layout_compatibility(my_data, my_data->setMap[state.boundDescriptorSets[setIndex]],
2993                                                            pPipe->graphicsPipelineCI.layout, setIndex, errorString)) {
2994                    // Set is bound but not compatible w/ overlapping pipelineLayout from PSO
2995                    VkDescriptorSet setHandle = my_data->setMap[state.boundDescriptorSets[setIndex]]->set;
2996                    result |= log_msg(
2997                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2998                        (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
2999                        "VkDescriptorSet (%#" PRIxLEAST64
3000                        ") bound as set #%u is not compatible with overlapping VkPipelineLayout %#" PRIxLEAST64 " due to: %s",
3001                        (uint64_t)setHandle, setIndex, (uint64_t)pPipe->graphicsPipelineCI.layout, errorString.c_str());
3002                } else { // Valid set is bound and layout compatible, validate that it's updated
3003                    // Pull the set node
3004                    SET_NODE *pSet = my_data->setMap[state.boundDescriptorSets[setIndex]];
3005                    // Save vector of all active sets to verify dynamicOffsets below
3006                    // activeSetNodes.push_back(pSet);
3007                    activeSetBindingsPairs.push_back(std::make_pair(pSet, setBindingPair.second));
3008                    // Make sure set has been updated
3009                    if (!pSet->pUpdateStructs) {
3010                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3011                                          VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pSet->set, __LINE__,
3012                                          DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
3013                                          "DS %#" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
3014                                                              "this will result in undefined behavior.",
3015                                          (uint64_t)pSet->set);
3016                    }
3017                }
3018            }
3019            // For given active slots, verify any dynamic descriptors and record updated images & buffers
3020            result |= validate_and_update_drawtime_descriptor_state(my_data, pCB, activeSetBindingsPairs);
3021        }
3022        if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint) {
3023            // Verify Vtx binding
3024            if (pPipe->vertexBindingDescriptions.size() > 0) {
3025                for (size_t i = 0; i < pPipe->vertexBindingDescriptions.size(); i++) {
3026                    if ((pCB->currentDrawData.buffers.size() < (i + 1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) {
3027                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3028                                          __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
3029                                          "The Pipeline State Object (%#" PRIxLEAST64
3030                                          ") expects that this Command Buffer's vertex binding Index " PRINTF_SIZE_T_SPECIFIER
3031                                          " should be set via vkCmdBindVertexBuffers.",
3032                                          (uint64_t)state.pipeline, i);
3033                    }
3034                }
3035            } else {
3036                if (!pCB->currentDrawData.buffers.empty()) {
3037                    result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
3038                                      (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
3039                                      "Vertex buffers are bound to command buffer (%#" PRIxLEAST64
3040                                      ") but no vertex buffers are attached to this Pipeline State Object (%#" PRIxLEAST64 ").",
3041                                      (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline);
3042                }
3043            }
3044            // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
3045            // Skip check if rasterization is disabled or there is no viewport.
3046            if ((!pPipe->graphicsPipelineCI.pRasterizationState ||
3047                 !pPipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) &&
3048                pPipe->graphicsPipelineCI.pViewportState) {
3049                VkBool32 dynViewport = isDynamic(pPipe, VK_DYNAMIC_STATE_VIEWPORT);
3050                VkBool32 dynScissor = isDynamic(pPipe, VK_DYNAMIC_STATE_SCISSOR);
3051                if (dynViewport) {
3052                    if (pCB->viewports.size() != pPipe->graphicsPipelineCI.pViewportState->viewportCount) {
3053                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3054                                          __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3055                                          "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER
3056                                          ", but PSO viewportCount is %u. These counts must match.",
3057                                          pCB->viewports.size(), pPipe->graphicsPipelineCI.pViewportState->viewportCount);
3058                    }
3059                }
3060                if (dynScissor) {
3061                    if (pCB->scissors.size() != pPipe->graphicsPipelineCI.pViewportState->scissorCount) {
3062                        result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3063                                          __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3064                                          "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER
3065                                          ", but PSO scissorCount is %u. These counts must match.",
3066                                          pCB->scissors.size(), pPipe->graphicsPipelineCI.pViewportState->scissorCount);
3067                    }
3068                }
3069            }
3070        }
3071    }
3072    return result;
3073}
3074
3075// Verify that create state for a pipeline is valid
3076static VkBool32 verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
3077                                          int pipelineIndex) {
3078    VkBool32 skipCall = VK_FALSE;
3079
3080    PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
3081
3082    // If create derivative bit is set, check that we've specified a base
3083    // pipeline correctly, and that the base pipeline was created to allow
3084    // derivatives.
3085    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3086        PIPELINE_NODE *pBasePipeline = nullptr;
3087        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3088              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3089            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3090                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3091                                "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3092        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3093            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3094                skipCall |=
3095                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3096                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3097                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
3098            } else {
3099                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3100            }
3101        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3102            pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3103        }
3104
3105        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3106            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3107                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3108                                "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3109        }
3110    }
3111
3112    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3113        if (!my_data->physDevProperties.features.independentBlend) {
3114            if (pPipeline->attachments.size() > 1) {
3115                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3116                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3117                    if ((pAttachments[0].blendEnable != pAttachments[i].blendEnable) ||
3118                        (pAttachments[0].srcColorBlendFactor != pAttachments[i].srcColorBlendFactor) ||
3119                        (pAttachments[0].dstColorBlendFactor != pAttachments[i].dstColorBlendFactor) ||
3120                        (pAttachments[0].colorBlendOp != pAttachments[i].colorBlendOp) ||
3121                        (pAttachments[0].srcAlphaBlendFactor != pAttachments[i].srcAlphaBlendFactor) ||
3122                        (pAttachments[0].dstAlphaBlendFactor != pAttachments[i].dstAlphaBlendFactor) ||
3123                        (pAttachments[0].alphaBlendOp != pAttachments[i].alphaBlendOp) ||
3124                        (pAttachments[0].colorWriteMask != pAttachments[i].colorWriteMask)) {
3125                        skipCall |=
3126                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3127                            DRAWSTATE_INDEPENDENT_BLEND, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
3128                            "enabled, all elements of pAttachments must be identical");
3129                    }
3130                }
3131            }
3132        }
3133        if (!my_data->physDevProperties.features.logicOp &&
3134            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3135            skipCall |=
3136                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3137                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
3138                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
3139        }
3140        if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) &&
3141            ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) ||
3142             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
3143            skipCall |=
3144                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3145                        DRAWSTATE_INVALID_LOGIC_OP, "DS",
3146                        "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
3147        }
3148    }
3149
3150    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3151    // produces nonsense errors that confuse users. Other layers should already
3152    // emit errors for renderpass being invalid.
3153    auto rp_data = my_data->renderPassMap.find(pPipeline->graphicsPipelineCI.renderPass);
3154    if (rp_data != my_data->renderPassMap.end() &&
3155        pPipeline->graphicsPipelineCI.subpass >= rp_data->second->pCreateInfo->subpassCount) {
3156        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3157                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3158                                                                           "is out of range for this renderpass (0..%u)",
3159                            pPipeline->graphicsPipelineCI.subpass, rp_data->second->pCreateInfo->subpassCount - 1);
3160    }
3161
3162    if (!validate_and_capture_pipeline_shader_state(my_data, pPipeline)) {
3163        skipCall = VK_TRUE;
3164    }
3165    // VS is required
3166    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3167        skipCall |=
3168            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3169                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3170    }
3171    // Either both or neither TC/TE shaders should be defined
3172    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3173        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3174        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3175                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3176                            "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3177    }
3178    // Compute shaders should be specified independent of Gfx shaders
3179    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3180        (pPipeline->active_shaders &
3181         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3182          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3183        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3184                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3185                            "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3186    }
3187    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3188    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3189    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3190        (pPipeline->iaStateCI.topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3191        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3192                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3193                                                                           "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3194                                                                           "topology for tessellation pipelines");
3195    }
3196    if (pPipeline->iaStateCI.topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3197        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3198            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3199                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3200                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3201                                                                               "topology is only valid for tessellation pipelines");
3202        }
3203        if (!pPipeline->tessStateCI.patchControlPoints || (pPipeline->tessStateCI.patchControlPoints > 32)) {
3204            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3205                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3206                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3207                                                                               "topology used with patchControlPoints value %u."
3208                                                                               " patchControlPoints should be >0 and <=32.",
3209                                pPipeline->tessStateCI.patchControlPoints);
3210        }
3211    }
3212    // Viewport state must be included if rasterization is enabled.
3213    // If the viewport state is included, the viewport and scissor counts should always match.
3214    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3215    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3216        !pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
3217        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3218            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3219                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3220                                                                           "and scissors are dynamic PSO must include "
3221                                                                           "viewportCount and scissorCount in pViewportState.");
3222        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3223                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3224            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3225                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3226                                "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3227                                pPipeline->vpStateCI.viewportCount, pPipeline->vpStateCI.scissorCount);
3228        } else {
3229            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3230            VkBool32 dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3231            VkBool32 dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3232            if (!dynViewport) {
3233                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3234                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3235                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3236                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3237                                        "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3238                                        "must either include pViewports data, or include viewport in pDynamicState and set it with "
3239                                        "vkCmdSetViewport().",
3240                                        pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3241                }
3242            }
3243            if (!dynScissor) {
3244                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3245                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3246                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3247                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3248                                        "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3249                                        "must either include pScissors data, or include scissor in pDynamicState and set it with "
3250                                        "vkCmdSetScissor().",
3251                                        pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3252                }
3253            }
3254        }
3255    }
3256    return skipCall;
3257}
3258
3259// Init the pipeline mapping info based on pipeline create info LL tree
3260//  Threading note : Calls to this function should wrapped in mutex
3261// TODO : this should really just be in the constructor for PIPELINE_NODE
3262static PIPELINE_NODE *initGraphicsPipeline(layer_data *dev_data, const VkGraphicsPipelineCreateInfo *pCreateInfo) {
3263    PIPELINE_NODE *pPipeline = new PIPELINE_NODE;
3264
3265    // First init create info
3266    memcpy(&pPipeline->graphicsPipelineCI, pCreateInfo, sizeof(VkGraphicsPipelineCreateInfo));
3267
3268    size_t bufferSize = 0;
3269    const VkPipelineVertexInputStateCreateInfo *pVICI = NULL;
3270    const VkPipelineColorBlendStateCreateInfo *pCBCI = NULL;
3271
3272    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
3273        const VkPipelineShaderStageCreateInfo *pPSSCI = &pCreateInfo->pStages[i];
3274
3275        switch (pPSSCI->stage) {
3276        case VK_SHADER_STAGE_VERTEX_BIT:
3277            memcpy(&pPipeline->vsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3278            pPipeline->active_shaders |= VK_SHADER_STAGE_VERTEX_BIT;
3279            break;
3280        case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
3281            memcpy(&pPipeline->tcsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3282            pPipeline->active_shaders |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
3283            break;
3284        case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
3285            memcpy(&pPipeline->tesCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3286            pPipeline->active_shaders |= VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
3287            break;
3288        case VK_SHADER_STAGE_GEOMETRY_BIT:
3289            memcpy(&pPipeline->gsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3290            pPipeline->active_shaders |= VK_SHADER_STAGE_GEOMETRY_BIT;
3291            break;
3292        case VK_SHADER_STAGE_FRAGMENT_BIT:
3293            memcpy(&pPipeline->fsCI, pPSSCI, sizeof(VkPipelineShaderStageCreateInfo));
3294            pPipeline->active_shaders |= VK_SHADER_STAGE_FRAGMENT_BIT;
3295            break;
3296        case VK_SHADER_STAGE_COMPUTE_BIT:
3297            // TODO : Flag error, CS is specified through VkComputePipelineCreateInfo
3298            pPipeline->active_shaders |= VK_SHADER_STAGE_COMPUTE_BIT;
3299            break;
3300        default:
3301            // TODO : Flag error
3302            break;
3303        }
3304    }
3305    // Copy over GraphicsPipelineCreateInfo structure embedded pointers
3306    if (pCreateInfo->stageCount != 0) {
3307        pPipeline->graphicsPipelineCI.pStages = new VkPipelineShaderStageCreateInfo[pCreateInfo->stageCount];
3308        bufferSize = pCreateInfo->stageCount * sizeof(VkPipelineShaderStageCreateInfo);
3309        memcpy((void *)pPipeline->graphicsPipelineCI.pStages, pCreateInfo->pStages, bufferSize);
3310    }
3311    if (pCreateInfo->pVertexInputState != NULL) {
3312        pPipeline->vertexInputCI = *pCreateInfo->pVertexInputState;
3313        // Copy embedded ptrs
3314        pVICI = pCreateInfo->pVertexInputState;
3315        if (pVICI->vertexBindingDescriptionCount) {
3316            pPipeline->vertexBindingDescriptions = std::vector<VkVertexInputBindingDescription>(
3317                pVICI->pVertexBindingDescriptions, pVICI->pVertexBindingDescriptions + pVICI->vertexBindingDescriptionCount);
3318        }
3319        if (pVICI->vertexAttributeDescriptionCount) {
3320            pPipeline->vertexAttributeDescriptions = std::vector<VkVertexInputAttributeDescription>(
3321                pVICI->pVertexAttributeDescriptions, pVICI->pVertexAttributeDescriptions + pVICI->vertexAttributeDescriptionCount);
3322        }
3323        pPipeline->graphicsPipelineCI.pVertexInputState = &pPipeline->vertexInputCI;
3324    }
3325    if (pCreateInfo->pInputAssemblyState != NULL) {
3326        pPipeline->iaStateCI = *pCreateInfo->pInputAssemblyState;
3327        pPipeline->graphicsPipelineCI.pInputAssemblyState = &pPipeline->iaStateCI;
3328    }
3329    if (pCreateInfo->pTessellationState != NULL) {
3330        pPipeline->tessStateCI = *pCreateInfo->pTessellationState;
3331        pPipeline->graphicsPipelineCI.pTessellationState = &pPipeline->tessStateCI;
3332    }
3333    if (pCreateInfo->pViewportState != NULL) {
3334        pPipeline->vpStateCI = *pCreateInfo->pViewportState;
3335        pPipeline->graphicsPipelineCI.pViewportState = &pPipeline->vpStateCI;
3336    }
3337    if (pCreateInfo->pRasterizationState != NULL) {
3338        pPipeline->rsStateCI = *pCreateInfo->pRasterizationState;
3339        pPipeline->graphicsPipelineCI.pRasterizationState = &pPipeline->rsStateCI;
3340    }
3341    if (pCreateInfo->pMultisampleState != NULL) {
3342        pPipeline->msStateCI = *pCreateInfo->pMultisampleState;
3343        pPipeline->graphicsPipelineCI.pMultisampleState = &pPipeline->msStateCI;
3344    }
3345    if (pCreateInfo->pDepthStencilState != NULL) {
3346        pPipeline->dsStateCI = *pCreateInfo->pDepthStencilState;
3347        pPipeline->graphicsPipelineCI.pDepthStencilState = &pPipeline->dsStateCI;
3348    }
3349    if (pCreateInfo->pColorBlendState != NULL) {
3350        pPipeline->cbStateCI = *pCreateInfo->pColorBlendState;
3351        // Copy embedded ptrs
3352        pCBCI = pCreateInfo->pColorBlendState;
3353        if (pCBCI->attachmentCount) {
3354            pPipeline->attachments = std::vector<VkPipelineColorBlendAttachmentState>(
3355                pCBCI->pAttachments, pCBCI->pAttachments + pCBCI->attachmentCount);
3356        }
3357        pPipeline->graphicsPipelineCI.pColorBlendState = &pPipeline->cbStateCI;
3358    }
3359    if (pCreateInfo->pDynamicState != NULL) {
3360        pPipeline->dynStateCI = *pCreateInfo->pDynamicState;
3361        if (pPipeline->dynStateCI.dynamicStateCount) {
3362            pPipeline->dynStateCI.pDynamicStates = new VkDynamicState[pPipeline->dynStateCI.dynamicStateCount];
3363            bufferSize = pPipeline->dynStateCI.dynamicStateCount * sizeof(VkDynamicState);
3364            memcpy((void *)pPipeline->dynStateCI.pDynamicStates, pCreateInfo->pDynamicState->pDynamicStates, bufferSize);
3365        }
3366        pPipeline->graphicsPipelineCI.pDynamicState = &pPipeline->dynStateCI;
3367    }
3368    return pPipeline;
3369}
3370
3371// Free the Pipeline nodes
3372static void deletePipelines(layer_data *my_data) {
3373    if (my_data->pipelineMap.size() <= 0)
3374        return;
3375    for (auto ii = my_data->pipelineMap.begin(); ii != my_data->pipelineMap.end(); ++ii) {
3376        if ((*ii).second->graphicsPipelineCI.stageCount != 0) {
3377            delete[](*ii).second->graphicsPipelineCI.pStages;
3378        }
3379        if ((*ii).second->dynStateCI.dynamicStateCount != 0) {
3380            delete[](*ii).second->dynStateCI.pDynamicStates;
3381        }
3382        delete (*ii).second;
3383    }
3384    my_data->pipelineMap.clear();
3385}
3386
3387// For given pipeline, return number of MSAA samples, or one if MSAA disabled
3388static VkSampleCountFlagBits getNumSamples(layer_data *my_data, const VkPipeline pipeline) {
3389    PIPELINE_NODE *pPipe = my_data->pipelineMap[pipeline];
3390    if (VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pPipe->msStateCI.sType) {
3391        return pPipe->msStateCI.rasterizationSamples;
3392    }
3393    return VK_SAMPLE_COUNT_1_BIT;
3394}
3395
3396// Validate state related to the PSO
3397static VkBool32 validatePipelineState(layer_data *my_data, const GLOBAL_CB_NODE *pCB, const VkPipelineBindPoint pipelineBindPoint,
3398                                      const VkPipeline pipeline) {
3399    VkBool32 skipCall = VK_FALSE;
3400    if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
3401        // Verify that any MSAA request in PSO matches sample# in bound FB
3402        // Skip the check if rasterization is disabled.
3403        PIPELINE_NODE *pPipeline = my_data->pipelineMap[pipeline];
3404        if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3405            !pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable) {
3406            VkSampleCountFlagBits psoNumSamples = getNumSamples(my_data, pipeline);
3407            if (pCB->activeRenderPass) {
3408                const VkRenderPassCreateInfo *pRPCI = my_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
3409                const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
3410                VkSampleCountFlagBits subpassNumSamples = (VkSampleCountFlagBits)0;
3411                uint32_t i;
3412
3413                if (pPipeline->cbStateCI.attachmentCount != pSD->colorAttachmentCount) {
3414                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3415                                        reinterpret_cast<const uint64_t &>(pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
3416                                        "Mismatch between blend state attachment count %u and subpass %u color attachment "
3417                                        "count %u!  These must be the same.",
3418                                        pPipeline->cbStateCI.attachmentCount, pCB->activeSubpass, pSD->colorAttachmentCount);
3419                }
3420
3421                for (i = 0; i < pSD->colorAttachmentCount; i++) {
3422                    VkSampleCountFlagBits samples;
3423
3424                    if (pSD->pColorAttachments[i].attachment == VK_ATTACHMENT_UNUSED)
3425                        continue;
3426
3427                    samples = pRPCI->pAttachments[pSD->pColorAttachments[i].attachment].samples;
3428                    if (subpassNumSamples == (VkSampleCountFlagBits)0) {
3429                        subpassNumSamples = samples;
3430                    } else if (subpassNumSamples != samples) {
3431                        subpassNumSamples = (VkSampleCountFlagBits)-1;
3432                        break;
3433                    }
3434                }
3435                if (pSD->pDepthStencilAttachment && pSD->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3436                    const VkSampleCountFlagBits samples = pRPCI->pAttachments[pSD->pDepthStencilAttachment->attachment].samples;
3437                    if (subpassNumSamples == (VkSampleCountFlagBits)0)
3438                        subpassNumSamples = samples;
3439                    else if (subpassNumSamples != samples)
3440                        subpassNumSamples = (VkSampleCountFlagBits)-1;
3441                }
3442
3443                if (psoNumSamples != subpassNumSamples) {
3444                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3445                                        (uint64_t)pipeline, __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3446                                        "Num samples mismatch! Binding PSO (%#" PRIxLEAST64
3447                                        ") with %u samples while current RenderPass (%#" PRIxLEAST64 ") w/ %u samples!",
3448                                        (uint64_t)pipeline, psoNumSamples, (uint64_t)pCB->activeRenderPass, subpassNumSamples);
3449                }
3450            } else {
3451                // TODO : I believe it's an error if we reach this point and don't have an activeRenderPass
3452                //   Verify and flag error as appropriate
3453            }
3454        }
3455        // TODO : Add more checks here
3456    } else {
3457        // TODO : Validate non-gfx pipeline updates
3458    }
3459    return skipCall;
3460}
3461
3462// Block of code at start here specifically for managing/tracking DSs
3463
3464// Return Pool node ptr for specified pool or else NULL
3465static DESCRIPTOR_POOL_NODE *getPoolNode(layer_data *my_data, const VkDescriptorPool pool) {
3466    if (my_data->descriptorPoolMap.find(pool) == my_data->descriptorPoolMap.end()) {
3467        return NULL;
3468    }
3469    return my_data->descriptorPoolMap[pool];
3470}
3471
3472static LAYOUT_NODE *getLayoutNode(layer_data *my_data, const VkDescriptorSetLayout layout) {
3473    if (my_data->descriptorSetLayoutMap.find(layout) == my_data->descriptorSetLayoutMap.end()) {
3474        return NULL;
3475    }
3476    return my_data->descriptorSetLayoutMap[layout];
3477}
3478
3479// Return VK_FALSE if update struct is of valid type, otherwise flag error and return code from callback
3480static VkBool32 validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3481    switch (pUpdateStruct->sType) {
3482    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3483    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3484        return VK_FALSE;
3485    default:
3486        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3487                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3488                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3489                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3490    }
3491}
3492
3493// Set count for given update struct in the last parameter
3494// Return value of skipCall, which is only VK_TRUE if error occurs and callback signals execution to cease
3495static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3496    switch (pUpdateStruct->sType) {
3497    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3498        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3499    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3500        // TODO : Need to understand this case better and make sure code is correct
3501        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3502    default:
3503        return 0;
3504    }
3505    return 0;
3506}
3507
3508// For given layout and update, return the first overall index of the layout that is updated
3509static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding,
3510                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3511    return getBindingStartIndex(pLayout, binding) + arrayIndex;
3512}
3513
3514// For given layout and update, return the last overall index of the layout that is updated
3515static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout, const uint32_t binding,
3516                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3517    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3518    return getBindingStartIndex(pLayout, binding) + arrayIndex + count - 1;
3519}
3520
3521// Verify that the descriptor type in the update struct matches what's expected by the layout
3522static VkBool32 validateUpdateConsistency(layer_data *my_data, const VkDevice device, const LAYOUT_NODE *pLayout,
3523                                          const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3524    // First get actual type of update
3525    VkBool32 skipCall = VK_FALSE;
3526    VkDescriptorType actualType;
3527    uint32_t i = 0;
3528    switch (pUpdateStruct->sType) {
3529    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3530        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3531        break;
3532    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3533        /* no need to validate */
3534        return VK_FALSE;
3535        break;
3536    default:
3537        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3538                            DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3539                            "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3540                            string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3541    }
3542    if (VK_FALSE == skipCall) {
3543        // Set first stageFlags as reference and verify that all other updates match it
3544        VkShaderStageFlags refStageFlags = pLayout->stageFlags[startIndex];
3545        for (i = startIndex; i <= endIndex; i++) {
3546            if (pLayout->descriptorTypes[i] != actualType) {
3547                skipCall |= log_msg(
3548                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3549                    DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3550                    "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3551                    string_VkDescriptorType(actualType), string_VkDescriptorType(pLayout->descriptorTypes[i]));
3552            }
3553            if (pLayout->stageFlags[i] != refStageFlags) {
3554                skipCall |= log_msg(
3555                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3556                    DRAWSTATE_DESCRIPTOR_STAGEFLAGS_MISMATCH, "DS",
3557                    "Write descriptor update has stageFlags %x that do not match overlapping binding descriptor stageFlags of %x!",
3558                    refStageFlags, pLayout->stageFlags[i]);
3559            }
3560        }
3561    }
3562    return skipCall;
3563}
3564
3565// Determine the update type, allocate a new struct of that type, shadow the given pUpdate
3566//   struct into the pNewNode param. Return VK_TRUE if error condition encountered and callback signals early exit.
3567// NOTE : Calls to this function should be wrapped in mutex
3568static VkBool32 shadowUpdateNode(layer_data *my_data, const VkDevice device, GENERIC_HEADER *pUpdate, GENERIC_HEADER **pNewNode) {
3569    VkBool32 skipCall = VK_FALSE;
3570    VkWriteDescriptorSet *pWDS = NULL;
3571    VkCopyDescriptorSet *pCDS = NULL;
3572    switch (pUpdate->sType) {
3573    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3574        pWDS = new VkWriteDescriptorSet;
3575        *pNewNode = (GENERIC_HEADER *)pWDS;
3576        memcpy(pWDS, pUpdate, sizeof(VkWriteDescriptorSet));
3577
3578        switch (pWDS->descriptorType) {
3579        case VK_DESCRIPTOR_TYPE_SAMPLER:
3580        case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3581        case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3582        case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
3583            VkDescriptorImageInfo *info = new VkDescriptorImageInfo[pWDS->descriptorCount];
3584            memcpy(info, pWDS->pImageInfo, pWDS->descriptorCount * sizeof(VkDescriptorImageInfo));
3585            pWDS->pImageInfo = info;
3586        } break;
3587        case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3588        case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
3589            VkBufferView *info = new VkBufferView[pWDS->descriptorCount];
3590            memcpy(info, pWDS->pTexelBufferView, pWDS->descriptorCount * sizeof(VkBufferView));
3591            pWDS->pTexelBufferView = info;
3592        } break;
3593        case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3594        case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3595        case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3596        case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
3597            VkDescriptorBufferInfo *info = new VkDescriptorBufferInfo[pWDS->descriptorCount];
3598            memcpy(info, pWDS->pBufferInfo, pWDS->descriptorCount * sizeof(VkDescriptorBufferInfo));
3599            pWDS->pBufferInfo = info;
3600        } break;
3601        default:
3602            return VK_ERROR_VALIDATION_FAILED_EXT;
3603            break;
3604        }
3605        break;
3606    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3607        pCDS = new VkCopyDescriptorSet;
3608        *pNewNode = (GENERIC_HEADER *)pCDS;
3609        memcpy(pCDS, pUpdate, sizeof(VkCopyDescriptorSet));
3610        break;
3611    default:
3612        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3613                    DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3614                    "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3615                    string_VkStructureType(pUpdate->sType), pUpdate->sType))
3616            return VK_TRUE;
3617    }
3618    // Make sure that pNext for the end of shadow copy is NULL
3619    (*pNewNode)->pNext = NULL;
3620    return skipCall;
3621}
3622
3623// Verify that given sampler is valid
3624static VkBool32 validateSampler(const layer_data *my_data, const VkSampler *pSampler, const VkBool32 immutable) {
3625    VkBool32 skipCall = VK_FALSE;
3626    auto sampIt = my_data->sampleMap.find(*pSampler);
3627    if (sampIt == my_data->sampleMap.end()) {
3628        if (!immutable) {
3629            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3630                                (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3631                                "vkUpdateDescriptorSets: Attempt to update descriptor with invalid sampler %#" PRIxLEAST64,
3632                                (uint64_t)*pSampler);
3633        } else { // immutable
3634            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3635                                (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3636                                "vkUpdateDescriptorSets: Attempt to update descriptor whose binding has an invalid immutable "
3637                                "sampler %#" PRIxLEAST64,
3638                                (uint64_t)*pSampler);
3639        }
3640    } else {
3641        // TODO : Any further checks we want to do on the sampler?
3642    }
3643    return skipCall;
3644}
3645
3646// find layout(s) on the cmd buf level
3647bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3648    ImageSubresourcePair imgpair = {image, true, range};
3649    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3650    if (imgsubIt == pCB->imageLayoutMap.end()) {
3651        imgpair = {image, false, VkImageSubresource()};
3652        imgsubIt = pCB->imageLayoutMap.find(imgpair);
3653        if (imgsubIt == pCB->imageLayoutMap.end())
3654            return false;
3655    }
3656    node = imgsubIt->second;
3657    return true;
3658}
3659
3660// find layout(s) on the global level
3661bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3662    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3663    if (imgsubIt == my_data->imageLayoutMap.end()) {
3664        imgpair = {imgpair.image, false, VkImageSubresource()};
3665        imgsubIt = my_data->imageLayoutMap.find(imgpair);
3666        if (imgsubIt == my_data->imageLayoutMap.end())
3667            return false;
3668    }
3669    layout = imgsubIt->second.layout;
3670    return true;
3671}
3672
3673bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3674    ImageSubresourcePair imgpair = {image, true, range};
3675    return FindLayout(my_data, imgpair, layout);
3676}
3677
3678bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3679    auto sub_data = my_data->imageSubresourceMap.find(image);
3680    if (sub_data == my_data->imageSubresourceMap.end())
3681        return false;
3682    auto imgIt = my_data->imageMap.find(image);
3683    if (imgIt == my_data->imageMap.end())
3684        return false;
3685    bool ignoreGlobal = false;
3686    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3687    // potential errors in this case.
3688    if (sub_data->second.size() >= (imgIt->second.createInfo.arrayLayers * imgIt->second.createInfo.mipLevels + 1)) {
3689        ignoreGlobal = true;
3690    }
3691    for (auto imgsubpair : sub_data->second) {
3692        if (ignoreGlobal && !imgsubpair.hasSubresource)
3693            continue;
3694        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3695        if (img_data != my_data->imageLayoutMap.end()) {
3696            layouts.push_back(img_data->second.layout);
3697        }
3698    }
3699    return true;
3700}
3701
3702// Set the layout on the global level
3703void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3704    VkImage &image = imgpair.image;
3705    // TODO (mlentine): Maybe set format if new? Not used atm.
3706    my_data->imageLayoutMap[imgpair].layout = layout;
3707    // TODO (mlentine): Maybe make vector a set?
3708    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3709    if (subresource == my_data->imageSubresourceMap[image].end()) {
3710        my_data->imageSubresourceMap[image].push_back(imgpair);
3711    }
3712}
3713
3714// Set the layout on the cmdbuf level
3715void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3716    pCB->imageLayoutMap[imgpair] = node;
3717    // TODO (mlentine): Maybe make vector a set?
3718    auto subresource =
3719        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3720    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3721        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3722    }
3723}
3724
3725void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3726    // TODO (mlentine): Maybe make vector a set?
3727    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3728        pCB->imageSubresourceMap[imgpair.image].end()) {
3729        pCB->imageLayoutMap[imgpair].layout = layout;
3730    } else {
3731        // TODO (mlentine): Could be expensive and might need to be removed.
3732        assert(imgpair.hasSubresource);
3733        IMAGE_CMD_BUF_LAYOUT_NODE node;
3734        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3735            node.initialLayout = layout;
3736        }
3737        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3738    }
3739}
3740
3741template <class OBJECT, class LAYOUT>
3742void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3743    if (imgpair.subresource.aspectMask & aspectMask) {
3744        imgpair.subresource.aspectMask = aspectMask;
3745        SetLayout(pObject, imgpair, layout);
3746    }
3747}
3748
3749template <class OBJECT, class LAYOUT>
3750void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3751    ImageSubresourcePair imgpair = {image, true, range};
3752    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3753    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3754    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3755    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3756}
3757
3758template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3759    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3760    SetLayout(pObject, image, imgpair, layout);
3761}
3762
3763void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3764    auto image_view_data = dev_data->imageViewMap.find(imageView);
3765    assert(image_view_data != dev_data->imageViewMap.end());
3766    const VkImage &image = image_view_data->second.image;
3767    const VkImageSubresourceRange &subRange = image_view_data->second.subresourceRange;
3768    // TODO: Do not iterate over every possibility - consolidate where possible
3769    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3770        uint32_t level = subRange.baseMipLevel + j;
3771        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3772            uint32_t layer = subRange.baseArrayLayer + k;
3773            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3774            SetLayout(pCB, image, sub, layout);
3775        }
3776    }
3777}
3778
3779// Verify that given imageView is valid
3780static VkBool32 validateImageView(const layer_data *my_data, const VkImageView *pImageView, const VkImageLayout imageLayout) {
3781    VkBool32 skipCall = VK_FALSE;
3782    auto ivIt = my_data->imageViewMap.find(*pImageView);
3783    if (ivIt == my_data->imageViewMap.end()) {
3784        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3785                            (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3786                            "vkUpdateDescriptorSets: Attempt to update descriptor with invalid imageView %#" PRIxLEAST64,
3787                            (uint64_t)*pImageView);
3788    } else {
3789        // Validate that imageLayout is compatible with aspectMask and image format
3790        VkImageAspectFlags aspectMask = ivIt->second.subresourceRange.aspectMask;
3791        VkImage image = ivIt->second.image;
3792        // TODO : Check here in case we have a bad image
3793        VkFormat format = VK_FORMAT_MAX_ENUM;
3794        auto imgIt = my_data->imageMap.find(image);
3795        if (imgIt != my_data->imageMap.end()) {
3796            format = (*imgIt).second.createInfo.format;
3797        } else {
3798            // Also need to check the swapchains.
3799            auto swapchainIt = my_data->device_extensions.imageToSwapchainMap.find(image);
3800            if (swapchainIt != my_data->device_extensions.imageToSwapchainMap.end()) {
3801                VkSwapchainKHR swapchain = swapchainIt->second;
3802                auto swapchain_nodeIt = my_data->device_extensions.swapchainMap.find(swapchain);
3803                if (swapchain_nodeIt != my_data->device_extensions.swapchainMap.end()) {
3804                    SWAPCHAIN_NODE *pswapchain_node = swapchain_nodeIt->second;
3805                    format = pswapchain_node->createInfo.imageFormat;
3806                }
3807            }
3808        }
3809        if (format == VK_FORMAT_MAX_ENUM) {
3810            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3811                                (uint64_t)image, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3812                                "vkUpdateDescriptorSets: Attempt to update descriptor with invalid image %#" PRIxLEAST64
3813                                " in imageView %#" PRIxLEAST64,
3814                                (uint64_t)image, (uint64_t)*pImageView);
3815        } else {
3816            VkBool32 ds = vk_format_is_depth_or_stencil(format);
3817            switch (imageLayout) {
3818            case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
3819                // Only Color bit must be set
3820                if ((aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
3821                    skipCall |=
3822                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3823                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3824                                "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3825                                "and imageView %#" PRIxLEAST64 ""
3826                                " that does not have VK_IMAGE_ASPECT_COLOR_BIT set.",
3827                                (uint64_t)*pImageView);
3828                }
3829                // format must NOT be DS
3830                if (ds) {
3831                    skipCall |=
3832                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3833                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3834                                "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3835                                "and imageView %#" PRIxLEAST64 ""
3836                                " but the image format is %s which is not a color format.",
3837                                (uint64_t)*pImageView, string_VkFormat(format));
3838                }
3839                break;
3840            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
3841            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
3842                // Depth or stencil bit must be set, but both must NOT be set
3843                if (aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
3844                    if (aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
3845                        // both  must NOT be set
3846                        skipCall |=
3847                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3848                                    (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3849                                    "vkUpdateDescriptorSets: Updating descriptor with imageView %#" PRIxLEAST64 ""
3850                                    " that has both STENCIL and DEPTH aspects set",
3851                                    (uint64_t)*pImageView);
3852                    }
3853                } else if (!(aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
3854                    // Neither were set
3855                    skipCall |=
3856                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3857                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3858                                "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3859                                " that does not have STENCIL or DEPTH aspect set.",
3860                                string_VkImageLayout(imageLayout), (uint64_t)*pImageView);
3861                }
3862                // format must be DS
3863                if (!ds) {
3864                    skipCall |=
3865                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3866                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3867                                "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3868                                " but the image format is %s which is not a depth/stencil format.",
3869                                string_VkImageLayout(imageLayout), (uint64_t)*pImageView, string_VkFormat(format));
3870                }
3871                break;
3872            default:
3873                // anything to check for other layouts?
3874                break;
3875            }
3876        }
3877    }
3878    return skipCall;
3879}
3880
3881// Verify that given bufferView is valid
3882static VkBool32 validateBufferView(const layer_data *my_data, const VkBufferView *pBufferView) {
3883    VkBool32 skipCall = VK_FALSE;
3884    auto sampIt = my_data->bufferViewMap.find(*pBufferView);
3885    if (sampIt == my_data->bufferViewMap.end()) {
3886        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT,
3887                            (uint64_t)*pBufferView, __LINE__, DRAWSTATE_BUFFERVIEW_DESCRIPTOR_ERROR, "DS",
3888                            "vkUpdateDescriptorSets: Attempt to update descriptor with invalid bufferView %#" PRIxLEAST64,
3889                            (uint64_t)*pBufferView);
3890    } else {
3891        // TODO : Any further checks we want to do on the bufferView?
3892    }
3893    return skipCall;
3894}
3895
3896// Verify that given bufferInfo is valid
3897static VkBool32 validateBufferInfo(const layer_data *my_data, const VkDescriptorBufferInfo *pBufferInfo) {
3898    VkBool32 skipCall = VK_FALSE;
3899    auto sampIt = my_data->bufferMap.find(pBufferInfo->buffer);
3900    if (sampIt == my_data->bufferMap.end()) {
3901        skipCall |=
3902            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3903                    (uint64_t)pBufferInfo->buffer, __LINE__, DRAWSTATE_BUFFERINFO_DESCRIPTOR_ERROR, "DS",
3904                    "vkUpdateDescriptorSets: Attempt to update descriptor where bufferInfo has invalid buffer %#" PRIxLEAST64,
3905                    (uint64_t)pBufferInfo->buffer);
3906    } else {
3907        // TODO : Any further checks we want to do on the bufferView?
3908    }
3909    return skipCall;
3910}
3911
3912static VkBool32 validateUpdateContents(const layer_data *my_data, const VkWriteDescriptorSet *pWDS,
3913                                       const VkDescriptorSetLayoutBinding *pLayoutBinding) {
3914    VkBool32 skipCall = VK_FALSE;
3915    // First verify that for the given Descriptor type, the correct DescriptorInfo data is supplied
3916    const VkSampler *pSampler = NULL;
3917    VkBool32 immutable = VK_FALSE;
3918    uint32_t i = 0;
3919    // For given update type, verify that update contents are correct
3920    switch (pWDS->descriptorType) {
3921    case VK_DESCRIPTOR_TYPE_SAMPLER:
3922        for (i = 0; i < pWDS->descriptorCount; ++i) {
3923            skipCall |= validateSampler(my_data, &(pWDS->pImageInfo[i].sampler), immutable);
3924        }
3925        break;
3926    case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3927        for (i = 0; i < pWDS->descriptorCount; ++i) {
3928            if (NULL == pLayoutBinding->pImmutableSamplers) {
3929                pSampler = &(pWDS->pImageInfo[i].sampler);
3930                if (immutable) {
3931                    skipCall |= log_msg(
3932                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3933                        (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3934                        "vkUpdateDescriptorSets: Update #%u is not an immutable sampler %#" PRIxLEAST64
3935                        ", but previous update(s) from this "
3936                        "VkWriteDescriptorSet struct used an immutable sampler. All updates from a single struct must either "
3937                        "use immutable or non-immutable samplers.",
3938                        i, (uint64_t)*pSampler);
3939                }
3940            } else {
3941                if (i > 0 && !immutable) {
3942                    skipCall |= log_msg(
3943                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3944                        (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3945                        "vkUpdateDescriptorSets: Update #%u is an immutable sampler, but previous update(s) from this "
3946                        "VkWriteDescriptorSet struct used a non-immutable sampler. All updates from a single struct must either "
3947                        "use immutable or non-immutable samplers.",
3948                        i);
3949                }
3950                immutable = VK_TRUE;
3951                pSampler = &(pLayoutBinding->pImmutableSamplers[i]);
3952            }
3953            skipCall |= validateSampler(my_data, pSampler, immutable);
3954        }
3955    // Intentionally fall through here to also validate image stuff
3956    case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3957    case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
3958    case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
3959        for (i = 0; i < pWDS->descriptorCount; ++i) {
3960            skipCall |= validateImageView(my_data, &(pWDS->pImageInfo[i].imageView), pWDS->pImageInfo[i].imageLayout);
3961        }
3962        break;
3963    case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3964    case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
3965        for (i = 0; i < pWDS->descriptorCount; ++i) {
3966            skipCall |= validateBufferView(my_data, &(pWDS->pTexelBufferView[i]));
3967        }
3968        break;
3969    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3970    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3971    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3972    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
3973        for (i = 0; i < pWDS->descriptorCount; ++i) {
3974            skipCall |= validateBufferInfo(my_data, &(pWDS->pBufferInfo[i]));
3975        }
3976        break;
3977    default:
3978        break;
3979    }
3980    return skipCall;
3981}
3982// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3983// func_str is the name of the calling function
3984// Return VK_FALSE if no errors occur
3985// Return VK_TRUE if validation error occurs and callback returns VK_TRUE (to skip upcoming API call down the chain)
3986VkBool32 validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
3987    VkBool32 skip_call = VK_FALSE;
3988    auto set_node = my_data->setMap.find(set);
3989    if (set_node == my_data->setMap.end()) {
3990        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3991                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3992                             "Cannot call %s() on descriptor set %" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3993                             (uint64_t)(set));
3994    } else {
3995        if (set_node->second->in_use.load()) {
3996            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3997                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
3998                                 "DS", "Cannot call %s() on descriptor set %" PRIxLEAST64 " that is in use by a command buffer.",
3999                                 func_str.c_str(), (uint64_t)(set));
4000        }
4001    }
4002    return skip_call;
4003}
4004static void invalidateBoundCmdBuffers(layer_data *dev_data, const SET_NODE *pSet) {
4005    // Flag any CBs this set is bound to as INVALID
4006    for (auto cb : pSet->boundCmdBuffers) {
4007        auto cb_node = dev_data->commandBufferMap.find(cb);
4008        if (cb_node != dev_data->commandBufferMap.end()) {
4009            cb_node->second->state = CB_INVALID;
4010        }
4011    }
4012}
4013// update DS mappings based on write and copy update arrays
4014static VkBool32 dsUpdate(layer_data *my_data, VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pWDS,
4015                         uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pCDS) {
4016    VkBool32 skipCall = VK_FALSE;
4017
4018    LAYOUT_NODE *pLayout = NULL;
4019    VkDescriptorSetLayoutCreateInfo *pLayoutCI = NULL;
4020    // Validate Write updates
4021    uint32_t i = 0;
4022    for (i = 0; i < descriptorWriteCount; i++) {
4023        VkDescriptorSet ds = pWDS[i].dstSet;
4024        SET_NODE *pSet = my_data->setMap[ds];
4025        // Set being updated cannot be in-flight
4026        if ((skipCall = validateIdleDescriptorSet(my_data, ds, "VkUpdateDescriptorSets")) == VK_TRUE)
4027            return skipCall;
4028        // If set is bound to any cmdBuffers, mark them invalid
4029        invalidateBoundCmdBuffers(my_data, pSet);
4030        GENERIC_HEADER *pUpdate = (GENERIC_HEADER *)&pWDS[i];
4031        pLayout = pSet->pLayout;
4032        // First verify valid update struct
4033        if ((skipCall = validUpdateStruct(my_data, device, pUpdate)) == VK_TRUE) {
4034            break;
4035        }
4036        uint32_t binding = 0, endIndex = 0;
4037        binding = pWDS[i].dstBinding;
4038        auto bindingToIndex = pLayout->bindingToIndexMap.find(binding);
4039        // Make sure that layout being updated has the binding being updated
4040        if (bindingToIndex == pLayout->bindingToIndexMap.end()) {
4041            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4042                                (uint64_t)(ds), __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
4043                                "Descriptor Set %" PRIu64 " does not have binding to match "
4044                                "update binding %u for update type "
4045                                "%s!",
4046                                (uint64_t)(ds), binding, string_VkStructureType(pUpdate->sType));
4047        } else {
4048            // Next verify that update falls within size of given binding
4049            endIndex = getUpdateEndIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate);
4050            if (getBindingEndIndex(pLayout, binding) < endIndex) {
4051                pLayoutCI = &pLayout->createInfo;
4052                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
4053                skipCall |=
4054                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4055                            (uint64_t)(ds), __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4056                            "Descriptor update type of %s is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4057                            string_VkStructureType(pUpdate->sType), binding, DSstr.c_str());
4058            } else { // TODO : should we skip update on a type mismatch or force it?
4059                uint32_t startIndex;
4060                startIndex = getUpdateStartIndex(my_data, device, pLayout, binding, pWDS[i].dstArrayElement, pUpdate);
4061                // Layout bindings match w/ update, now verify that update type
4062                // & stageFlags are the same for entire update
4063                if ((skipCall = validateUpdateConsistency(my_data, device, pLayout, pUpdate, startIndex, endIndex)) == VK_FALSE) {
4064                    // The update is within bounds and consistent, but need to
4065                    // make sure contents make sense as well
4066                    if ((skipCall = validateUpdateContents(my_data, &pWDS[i],
4067                                                           &pLayout->createInfo.pBindings[bindingToIndex->second])) == VK_FALSE) {
4068                        // Update is good. Save the update info
4069                        // Create new update struct for this set's shadow copy
4070                        GENERIC_HEADER *pNewNode = NULL;
4071                        skipCall |= shadowUpdateNode(my_data, device, pUpdate, &pNewNode);
4072                        if (NULL == pNewNode) {
4073                            skipCall |= log_msg(
4074                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4075                                (uint64_t)(ds), __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
4076                                "Out of memory while attempting to allocate UPDATE struct in vkUpdateDescriptors()");
4077                        } else {
4078                            // Insert shadow node into LL of updates for this set
4079                            pNewNode->pNext = pSet->pUpdateStructs;
4080                            pSet->pUpdateStructs = pNewNode;
4081                            // Now update appropriate descriptor(s) to point to new Update node
4082                            for (uint32_t j = startIndex; j <= endIndex; j++) {
4083                                assert(j < pSet->descriptorCount);
4084                                pSet->pDescriptorUpdates[j] = pNewNode;
4085                            }
4086                        }
4087                    }
4088                }
4089            }
4090        }
4091    }
4092    // Now validate copy updates
4093    for (i = 0; i < descriptorCopyCount; ++i) {
4094        SET_NODE *pSrcSet = NULL, *pDstSet = NULL;
4095        LAYOUT_NODE *pSrcLayout = NULL, *pDstLayout = NULL;
4096        uint32_t srcStartIndex = 0, srcEndIndex = 0, dstStartIndex = 0, dstEndIndex = 0;
4097        // For each copy make sure that update falls within given layout and that types match
4098        pSrcSet = my_data->setMap[pCDS[i].srcSet];
4099        pDstSet = my_data->setMap[pCDS[i].dstSet];
4100        // Set being updated cannot be in-flight
4101        if ((skipCall = validateIdleDescriptorSet(my_data, pDstSet->set, "VkUpdateDescriptorSets")) == VK_TRUE)
4102            return skipCall;
4103        invalidateBoundCmdBuffers(my_data, pDstSet);
4104        pSrcLayout = pSrcSet->pLayout;
4105        pDstLayout = pDstSet->pLayout;
4106        // Validate that src binding is valid for src set layout
4107        if (pSrcLayout->bindingToIndexMap.find(pCDS[i].srcBinding) == pSrcLayout->bindingToIndexMap.end()) {
4108            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4109                                (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
4110                                "Copy descriptor update %u has srcBinding %u "
4111                                "which is out of bounds for underlying SetLayout "
4112                                "%#" PRIxLEAST64 " which only has bindings 0-%u.",
4113                                i, pCDS[i].srcBinding, (uint64_t)pSrcLayout->layout, pSrcLayout->createInfo.bindingCount - 1);
4114        } else if (pDstLayout->bindingToIndexMap.find(pCDS[i].dstBinding) == pDstLayout->bindingToIndexMap.end()) {
4115            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4116                                (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
4117                                "Copy descriptor update %u has dstBinding %u "
4118                                "which is out of bounds for underlying SetLayout "
4119                                "%#" PRIxLEAST64 " which only has bindings 0-%u.",
4120                                i, pCDS[i].dstBinding, (uint64_t)pDstLayout->layout, pDstLayout->createInfo.bindingCount - 1);
4121        } else {
4122            // Proceed with validation. Bindings are ok, but make sure update is within bounds of given layout
4123            srcEndIndex = getUpdateEndIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement,
4124                                            (const GENERIC_HEADER *)&(pCDS[i]));
4125            dstEndIndex = getUpdateEndIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement,
4126                                            (const GENERIC_HEADER *)&(pCDS[i]));
4127            if (getBindingEndIndex(pSrcLayout, pCDS[i].srcBinding) < srcEndIndex) {
4128                pLayoutCI = &pSrcLayout->createInfo;
4129                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
4130                skipCall |=
4131                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4132                            (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4133                            "Copy descriptor src update is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4134                            pCDS[i].srcBinding, DSstr.c_str());
4135            } else if (getBindingEndIndex(pDstLayout, pCDS[i].dstBinding) < dstEndIndex) {
4136                pLayoutCI = &pDstLayout->createInfo;
4137                string DSstr = vk_print_vkdescriptorsetlayoutcreateinfo(pLayoutCI, "{DS}    ");
4138                skipCall |=
4139                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4140                            (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
4141                            "Copy descriptor dest update is out of bounds for matching binding %u in Layout w/ CI:\n%s!",
4142                            pCDS[i].dstBinding, DSstr.c_str());
4143            } else {
4144                srcStartIndex = getUpdateStartIndex(my_data, device, pSrcLayout, pCDS[i].srcBinding, pCDS[i].srcArrayElement,
4145                                                    (const GENERIC_HEADER *)&(pCDS[i]));
4146                dstStartIndex = getUpdateStartIndex(my_data, device, pDstLayout, pCDS[i].dstBinding, pCDS[i].dstArrayElement,
4147                                                    (const GENERIC_HEADER *)&(pCDS[i]));
4148                for (uint32_t j = 0; j < pCDS[i].descriptorCount; ++j) {
4149                    // For copy just make sure that the types match and then perform the update
4150                    if (pSrcLayout->descriptorTypes[srcStartIndex + j] != pDstLayout->descriptorTypes[dstStartIndex + j]) {
4151                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4152                                            __LINE__, DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
4153                                            "Copy descriptor update index %u, update count #%u, has src update descriptor type %s "
4154                                            "that does not match overlapping dest descriptor type of %s!",
4155                                            i, j + 1, string_VkDescriptorType(pSrcLayout->descriptorTypes[srcStartIndex + j]),
4156                                            string_VkDescriptorType(pDstLayout->descriptorTypes[dstStartIndex + j]));
4157                    } else {
4158                        // point dst descriptor at corresponding src descriptor
4159                        // TODO : This may be a hole. I believe copy should be its own copy,
4160                        //  otherwise a subsequent write update to src will incorrectly affect the copy
4161                        pDstSet->pDescriptorUpdates[j + dstStartIndex] = pSrcSet->pDescriptorUpdates[j + srcStartIndex];
4162                        pDstSet->pUpdateStructs = pSrcSet->pUpdateStructs;
4163                    }
4164                }
4165            }
4166        }
4167    }
4168    return skipCall;
4169}
4170
4171// Verify that given pool has descriptors that are being requested for allocation.
4172// NOTE : Calls to this function should be wrapped in mutex
4173static VkBool32 validate_descriptor_availability_in_pool(layer_data *dev_data, DESCRIPTOR_POOL_NODE *pPoolNode, uint32_t count,
4174                                                         const VkDescriptorSetLayout *pSetLayouts) {
4175    VkBool32 skipCall = VK_FALSE;
4176    uint32_t i = 0;
4177    uint32_t j = 0;
4178
4179    // Track number of descriptorSets allowable in this pool
4180    if (pPoolNode->availableSets < count) {
4181        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4182                            reinterpret_cast<uint64_t &>(pPoolNode->pool), __LINE__, DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
4183                            "Unable to allocate %u descriptorSets from pool %#" PRIxLEAST64
4184                            ". This pool only has %d descriptorSets remaining.",
4185                            count, reinterpret_cast<uint64_t &>(pPoolNode->pool), pPoolNode->availableSets);
4186    } else {
4187        pPoolNode->availableSets -= count;
4188    }
4189
4190    for (i = 0; i < count; ++i) {
4191        LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pSetLayouts[i]);
4192        if (NULL == pLayout) {
4193            skipCall |=
4194                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
4195                        (uint64_t)pSetLayouts[i], __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
4196                        "Unable to find set layout node for layout %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
4197                        (uint64_t)pSetLayouts[i]);
4198        } else {
4199            uint32_t typeIndex = 0, poolSizeCount = 0;
4200            for (j = 0; j < pLayout->createInfo.bindingCount; ++j) {
4201                typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType);
4202                poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount;
4203                if (poolSizeCount > pPoolNode->availableDescriptorTypeCount[typeIndex]) {
4204                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4205                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pLayout->layout, __LINE__,
4206                                        DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
4207                                        "Unable to allocate %u descriptors of type %s from pool %#" PRIxLEAST64
4208                                        ". This pool only has %d descriptors of this type remaining.",
4209                                        poolSizeCount, string_VkDescriptorType(pLayout->createInfo.pBindings[j].descriptorType),
4210                                        (uint64_t)pPoolNode->pool, pPoolNode->availableDescriptorTypeCount[typeIndex]);
4211                } else { // Decrement available descriptors of this type
4212                    pPoolNode->availableDescriptorTypeCount[typeIndex] -= poolSizeCount;
4213                }
4214            }
4215        }
4216    }
4217    return skipCall;
4218}
4219
4220// Free the shadowed update node for this Set
4221// NOTE : Calls to this function should be wrapped in mutex
4222static void freeShadowUpdateTree(SET_NODE *pSet) {
4223    GENERIC_HEADER *pShadowUpdate = pSet->pUpdateStructs;
4224    pSet->pUpdateStructs = NULL;
4225    GENERIC_HEADER *pFreeUpdate = pShadowUpdate;
4226    // Clear the descriptor mappings as they will now be invalid
4227    pSet->pDescriptorUpdates.clear();
4228    while (pShadowUpdate) {
4229        pFreeUpdate = pShadowUpdate;
4230        pShadowUpdate = (GENERIC_HEADER *)pShadowUpdate->pNext;
4231        VkWriteDescriptorSet *pWDS = NULL;
4232        switch (pFreeUpdate->sType) {
4233        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
4234            pWDS = (VkWriteDescriptorSet *)pFreeUpdate;
4235            switch (pWDS->descriptorType) {
4236            case VK_DESCRIPTOR_TYPE_SAMPLER:
4237            case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
4238            case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
4239            case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
4240                delete[] pWDS->pImageInfo;
4241            } break;
4242            case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
4243            case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
4244                delete[] pWDS->pTexelBufferView;
4245            } break;
4246            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
4247            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
4248            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
4249            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
4250                delete[] pWDS->pBufferInfo;
4251            } break;
4252            default:
4253                break;
4254            }
4255            break;
4256        case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
4257            break;
4258        default:
4259            assert(0);
4260            break;
4261        }
4262        delete pFreeUpdate;
4263    }
4264}
4265
4266// Free all DS Pools including their Sets & related sub-structs
4267// NOTE : Calls to this function should be wrapped in mutex
4268static void deletePools(layer_data *my_data) {
4269    if (my_data->descriptorPoolMap.size() <= 0)
4270        return;
4271    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
4272        SET_NODE *pSet = (*ii).second->pSets;
4273        SET_NODE *pFreeSet = pSet;
4274        while (pSet) {
4275            pFreeSet = pSet;
4276            pSet = pSet->pNext;
4277            // Freeing layouts handled in deleteLayouts() function
4278            // Free Update shadow struct tree
4279            freeShadowUpdateTree(pFreeSet);
4280            delete pFreeSet;
4281        }
4282        delete (*ii).second;
4283    }
4284    my_data->descriptorPoolMap.clear();
4285}
4286
4287// WARN : Once deleteLayouts() called, any layout ptrs in Pool/Set data structure will be invalid
4288// NOTE : Calls to this function should be wrapped in mutex
4289static void deleteLayouts(layer_data *my_data) {
4290    if (my_data->descriptorSetLayoutMap.size() <= 0)
4291        return;
4292    for (auto ii = my_data->descriptorSetLayoutMap.begin(); ii != my_data->descriptorSetLayoutMap.end(); ++ii) {
4293        LAYOUT_NODE *pLayout = (*ii).second;
4294        if (pLayout->createInfo.pBindings) {
4295            for (uint32_t i = 0; i < pLayout->createInfo.bindingCount; i++) {
4296                delete[] pLayout->createInfo.pBindings[i].pImmutableSamplers;
4297            }
4298            delete[] pLayout->createInfo.pBindings;
4299        }
4300        delete pLayout;
4301    }
4302    my_data->descriptorSetLayoutMap.clear();
4303}
4304
4305// Currently clearing a set is removing all previous updates to that set
4306//  TODO : Validate if this is correct clearing behavior
4307static void clearDescriptorSet(layer_data *my_data, VkDescriptorSet set) {
4308    SET_NODE *pSet = getSetNode(my_data, set);
4309    if (!pSet) {
4310        // TODO : Return error
4311    } else {
4312        freeShadowUpdateTree(pSet);
4313    }
4314}
4315
4316static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
4317                                VkDescriptorPoolResetFlags flags) {
4318    DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
4319    if (!pPool) {
4320        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4321                (uint64_t)pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
4322                "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t)pool);
4323    } else {
4324        // TODO: validate flags
4325        // For every set off of this pool, clear it
4326        SET_NODE *pSet = pPool->pSets;
4327        while (pSet) {
4328            clearDescriptorSet(my_data, pSet->set);
4329            pSet = pSet->pNext;
4330        }
4331        // Reset available count for each type and available sets for this pool
4332        for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
4333            pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
4334        }
4335        pPool->availableSets = pPool->maxSets;
4336    }
4337}
4338
4339// For given CB object, fetch associated CB Node from map
4340static GLOBAL_CB_NODE *getCBNode(layer_data *my_data, const VkCommandBuffer cb) {
4341    if (my_data->commandBufferMap.count(cb) == 0) {
4342        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4343                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4344                "Attempt to use CommandBuffer %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
4345        return NULL;
4346    }
4347    return my_data->commandBufferMap[cb];
4348}
4349
4350// Free all CB Nodes
4351// NOTE : Calls to this function should be wrapped in mutex
4352static void deleteCommandBuffers(layer_data *my_data) {
4353    if (my_data->commandBufferMap.size() <= 0) {
4354        return;
4355    }
4356    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
4357        delete (*ii).second;
4358    }
4359    my_data->commandBufferMap.clear();
4360}
4361
4362static VkBool32 report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
4363    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4364                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
4365                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
4366}
4367
4368VkBool32 validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
4369    if (!pCB->activeRenderPass)
4370        return VK_FALSE;
4371    VkBool32 skip_call = VK_FALSE;
4372    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS && cmd_type != CMD_EXECUTECOMMANDS) {
4373        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4374                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4375                             "Commands cannot be called in a subpass using secondary command buffers.");
4376    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
4377        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4378                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4379                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
4380    }
4381    return skip_call;
4382}
4383
4384static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4385    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
4386        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4387                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4388                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4389    return false;
4390}
4391
4392static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4393    if (!(flags & VK_QUEUE_COMPUTE_BIT))
4394        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4395                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4396                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
4397    return false;
4398}
4399
4400static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4401    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
4402        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4403                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4404                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4405    return false;
4406}
4407
4408// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
4409//  in the recording state or if there's an issue with the Cmd ordering
4410static VkBool32 addCmd(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
4411    VkBool32 skipCall = VK_FALSE;
4412    auto pool_data = my_data->commandPoolMap.find(pCB->createInfo.commandPool);
4413    if (pool_data != my_data->commandPoolMap.end()) {
4414        VkQueueFlags flags = my_data->physDevProperties.queue_family_properties[pool_data->second.queueFamilyIndex].queueFlags;
4415        switch (cmd) {
4416        case CMD_BINDPIPELINE:
4417        case CMD_BINDPIPELINEDELTA:
4418        case CMD_BINDDESCRIPTORSETS:
4419        case CMD_FILLBUFFER:
4420        case CMD_CLEARCOLORIMAGE:
4421        case CMD_SETEVENT:
4422        case CMD_RESETEVENT:
4423        case CMD_WAITEVENTS:
4424        case CMD_BEGINQUERY:
4425        case CMD_ENDQUERY:
4426        case CMD_RESETQUERYPOOL:
4427        case CMD_COPYQUERYPOOLRESULTS:
4428        case CMD_WRITETIMESTAMP:
4429            skipCall |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4430            break;
4431        case CMD_SETVIEWPORTSTATE:
4432        case CMD_SETSCISSORSTATE:
4433        case CMD_SETLINEWIDTHSTATE:
4434        case CMD_SETDEPTHBIASSTATE:
4435        case CMD_SETBLENDSTATE:
4436        case CMD_SETDEPTHBOUNDSSTATE:
4437        case CMD_SETSTENCILREADMASKSTATE:
4438        case CMD_SETSTENCILWRITEMASKSTATE:
4439        case CMD_SETSTENCILREFERENCESTATE:
4440        case CMD_BINDINDEXBUFFER:
4441        case CMD_BINDVERTEXBUFFER:
4442        case CMD_DRAW:
4443        case CMD_DRAWINDEXED:
4444        case CMD_DRAWINDIRECT:
4445        case CMD_DRAWINDEXEDINDIRECT:
4446        case CMD_BLITIMAGE:
4447        case CMD_CLEARATTACHMENTS:
4448        case CMD_CLEARDEPTHSTENCILIMAGE:
4449        case CMD_RESOLVEIMAGE:
4450        case CMD_BEGINRENDERPASS:
4451        case CMD_NEXTSUBPASS:
4452        case CMD_ENDRENDERPASS:
4453            skipCall |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
4454            break;
4455        case CMD_DISPATCH:
4456        case CMD_DISPATCHINDIRECT:
4457            skipCall |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4458            break;
4459        case CMD_COPYBUFFER:
4460        case CMD_COPYIMAGE:
4461        case CMD_COPYBUFFERTOIMAGE:
4462        case CMD_COPYIMAGETOBUFFER:
4463        case CMD_CLONEIMAGEDATA:
4464        case CMD_UPDATEBUFFER:
4465        case CMD_PIPELINEBARRIER:
4466        case CMD_EXECUTECOMMANDS:
4467            break;
4468        default:
4469            break;
4470        }
4471    }
4472    if (pCB->state != CB_RECORDING) {
4473        skipCall |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
4474        skipCall |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
4475        CMD_NODE cmdNode = {};
4476        // init cmd node and append to end of cmd LL
4477        cmdNode.cmdNumber = ++pCB->numCmds;
4478        cmdNode.type = cmd;
4479        pCB->cmds.push_back(cmdNode);
4480    }
4481    return skipCall;
4482}
4483// Reset the command buffer state
4484//  Maintain the createInfo and set state to CB_NEW, but clear all other state
4485static void resetCB(layer_data *my_data, const VkCommandBuffer cb) {
4486    GLOBAL_CB_NODE *pCB = my_data->commandBufferMap[cb];
4487    if (pCB) {
4488        pCB->cmds.clear();
4489        // Reset CB state (note that createInfo is not cleared)
4490        pCB->commandBuffer = cb;
4491        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
4492        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
4493        pCB->numCmds = 0;
4494        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
4495        pCB->state = CB_NEW;
4496        pCB->submitCount = 0;
4497        pCB->status = 0;
4498        pCB->viewports.clear();
4499        pCB->scissors.clear();
4500        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4501            // Before clearing lastBoundState, remove any CB bindings from all uniqueBoundSets
4502            for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4503                auto set_node = my_data->setMap.find(set);
4504                if (set_node != my_data->setMap.end()) {
4505                    set_node->second->boundCmdBuffers.erase(pCB->commandBuffer);
4506                }
4507            }
4508            pCB->lastBound[i].reset();
4509        }
4510        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
4511        pCB->activeRenderPass = 0;
4512        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
4513        pCB->activeSubpass = 0;
4514        pCB->framebuffer = 0;
4515        pCB->fenceId = 0;
4516        pCB->lastSubmittedFence = VK_NULL_HANDLE;
4517        pCB->lastSubmittedQueue = VK_NULL_HANDLE;
4518        pCB->destroyedSets.clear();
4519        pCB->updatedSets.clear();
4520        pCB->destroyedFramebuffers.clear();
4521        pCB->waitedEvents.clear();
4522        pCB->semaphores.clear();
4523        pCB->events.clear();
4524        pCB->waitedEventsBeforeQueryReset.clear();
4525        pCB->queryToStateMap.clear();
4526        pCB->activeQueries.clear();
4527        pCB->startedQueries.clear();
4528        pCB->imageLayoutMap.clear();
4529        pCB->eventToStageMap.clear();
4530        pCB->drawData.clear();
4531        pCB->currentDrawData.buffers.clear();
4532        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
4533        pCB->secondaryCommandBuffers.clear();
4534        pCB->updateImages.clear();
4535        pCB->updateBuffers.clear();
4536        pCB->validate_functions.clear();
4537        pCB->pMemObjList.clear();
4538        pCB->eventUpdates.clear();
4539    }
4540}
4541
4542// Set PSO-related status bits for CB, including dynamic state set via PSO
4543static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
4544    // Account for any dynamic state not set via this PSO
4545    if (!pPipe->dynStateCI.dynamicStateCount) { // All state is static
4546        pCB->status = CBSTATUS_ALL;
4547    } else {
4548        // First consider all state on
4549        // Then unset any state that's noted as dynamic in PSO
4550        // Finally OR that into CB statemask
4551        CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
4552        for (uint32_t i = 0; i < pPipe->dynStateCI.dynamicStateCount; i++) {
4553            switch (pPipe->dynStateCI.pDynamicStates[i]) {
4554            case VK_DYNAMIC_STATE_VIEWPORT:
4555                psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET;
4556                break;
4557            case VK_DYNAMIC_STATE_SCISSOR:
4558                psoDynStateMask &= ~CBSTATUS_SCISSOR_SET;
4559                break;
4560            case VK_DYNAMIC_STATE_LINE_WIDTH:
4561                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
4562                break;
4563            case VK_DYNAMIC_STATE_DEPTH_BIAS:
4564                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
4565                break;
4566            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
4567                psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
4568                break;
4569            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
4570                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
4571                break;
4572            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
4573                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4574                break;
4575            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4576                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4577                break;
4578            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4579                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4580                break;
4581            default:
4582                // TODO : Flag error here
4583                break;
4584            }
4585        }
4586        pCB->status |= psoDynStateMask;
4587    }
4588}
4589
4590// Print the last bound Gfx Pipeline
4591static VkBool32 printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
4592    VkBool32 skipCall = VK_FALSE;
4593    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4594    if (pCB) {
4595        PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
4596        if (!pPipeTrav) {
4597            // nothing to print
4598        } else {
4599            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4600                                __LINE__, DRAWSTATE_NONE, "DS", "%s",
4601                                vk_print_vkgraphicspipelinecreateinfo(&pPipeTrav->graphicsPipelineCI, "{DS}").c_str());
4602        }
4603    }
4604    return skipCall;
4605}
4606
4607static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
4608    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4609    if (pCB && pCB->cmds.size() > 0) {
4610        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4611                DRAWSTATE_NONE, "DS", "Cmds in CB %p", (void *)cb);
4612        vector<CMD_NODE> cmds = pCB->cmds;
4613        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
4614            // TODO : Need to pass cb as srcObj here
4615            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4616                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD#%" PRIu64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
4617        }
4618    } else {
4619        // Nothing to print
4620    }
4621}
4622
4623static VkBool32 synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
4624    VkBool32 skipCall = VK_FALSE;
4625    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
4626        return skipCall;
4627    }
4628    skipCall |= printPipeline(my_data, cb);
4629    return skipCall;
4630}
4631
4632// Flags validation error if the associated call is made inside a render pass. The apiName
4633// routine should ONLY be called outside a render pass.
4634static VkBool32 insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4635    VkBool32 inside = VK_FALSE;
4636    if (pCB->activeRenderPass) {
4637        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4638                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
4639                         "%s: It is invalid to issue this call inside an active render pass (%#" PRIxLEAST64 ")", apiName,
4640                         (uint64_t)pCB->activeRenderPass);
4641    }
4642    return inside;
4643}
4644
4645// Flags validation error if the associated call is made outside a render pass. The apiName
4646// routine should ONLY be called inside a render pass.
4647static VkBool32 outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4648    VkBool32 outside = VK_FALSE;
4649    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4650        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4651         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4652        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4653                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
4654                          "%s: This call must be issued inside an active render pass.", apiName);
4655    }
4656    return outside;
4657}
4658
4659static void init_core_validation(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
4660
4661    layer_debug_actions(my_data->report_data, my_data->logging_callback, pAllocator, "lunarg_core_validation");
4662
4663    if (!globalLockInitialized) {
4664        loader_platform_thread_create_mutex(&globalLock);
4665        globalLockInitialized = 1;
4666    }
4667#if MTMERGESOURCE
4668    // Zero out memory property data
4669    memset(&memProps, 0, sizeof(VkPhysicalDeviceMemoryProperties));
4670#endif
4671}
4672
4673VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4674vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
4675    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4676
4677    assert(chain_info->u.pLayerInfo);
4678    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4679    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4680    if (fpCreateInstance == NULL)
4681        return VK_ERROR_INITIALIZATION_FAILED;
4682
4683    // Advance the link info for the next element on the chain
4684    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4685
4686    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4687    if (result != VK_SUCCESS)
4688        return result;
4689
4690    layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
4691    my_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
4692    layer_init_instance_dispatch_table(*pInstance, my_data->instance_dispatch_table, fpGetInstanceProcAddr);
4693
4694    my_data->report_data = debug_report_create_instance(my_data->instance_dispatch_table, *pInstance,
4695                                                        pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
4696
4697    init_core_validation(my_data, pAllocator);
4698
4699    ValidateLayerOrdering(*pCreateInfo);
4700
4701    return result;
4702}
4703
4704/* hook DestroyInstance to remove tableInstanceMap entry */
4705VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
4706    // TODOSC : Shouldn't need any customization here
4707    dispatch_key key = get_dispatch_key(instance);
4708    // TBD: Need any locking this early, in case this function is called at the
4709    // same time by more than one thread?
4710    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4711    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
4712    pTable->DestroyInstance(instance, pAllocator);
4713
4714    loader_platform_thread_lock_mutex(&globalLock);
4715    // Clean up logging callback, if any
4716    while (my_data->logging_callback.size() > 0) {
4717        VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
4718        layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
4719        my_data->logging_callback.pop_back();
4720    }
4721
4722    layer_debug_report_destroy_instance(my_data->report_data);
4723    delete my_data->instance_dispatch_table;
4724    layer_data_map.erase(key);
4725    loader_platform_thread_unlock_mutex(&globalLock);
4726    if (layer_data_map.empty()) {
4727        // Release mutex when destroying last instance.
4728        loader_platform_thread_delete_mutex(&globalLock);
4729        globalLockInitialized = 0;
4730    }
4731}
4732
4733static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
4734    uint32_t i;
4735    // TBD: Need any locking, in case this function is called at the same time
4736    // by more than one thread?
4737    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4738    dev_data->device_extensions.wsi_enabled = false;
4739
4740    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4741    PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
4742    pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
4743    pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
4744    pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
4745    pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
4746    pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
4747
4748    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4749        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4750            dev_data->device_extensions.wsi_enabled = true;
4751    }
4752}
4753
4754VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4755                                                              const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4756    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4757
4758    assert(chain_info->u.pLayerInfo);
4759    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4760    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4761    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
4762    if (fpCreateDevice == NULL) {
4763        return VK_ERROR_INITIALIZATION_FAILED;
4764    }
4765
4766    // Advance the link info for the next element on the chain
4767    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4768
4769    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4770    if (result != VK_SUCCESS) {
4771        return result;
4772    }
4773
4774    loader_platform_thread_lock_mutex(&globalLock);
4775    layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
4776    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4777
4778    // Setup device dispatch table
4779    my_device_data->device_dispatch_table = new VkLayerDispatchTable;
4780    layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
4781    my_device_data->device = *pDevice;
4782
4783    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4784    createDeviceRegisterExtensions(pCreateInfo, *pDevice);
4785    // Get physical device limits for this device
4786    my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->physDevProperties.properties));
4787    uint32_t count;
4788    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4789    my_device_data->physDevProperties.queue_family_properties.resize(count);
4790    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
4791        gpu, &count, &my_device_data->physDevProperties.queue_family_properties[0]);
4792    // TODO: device limits should make sure these are compatible
4793    if (pCreateInfo->pEnabledFeatures) {
4794        my_device_data->physDevProperties.features = *pCreateInfo->pEnabledFeatures;
4795    } else {
4796        memset(&my_device_data->physDevProperties.features, 0, sizeof(VkPhysicalDeviceFeatures));
4797    }
4798    loader_platform_thread_unlock_mutex(&globalLock);
4799
4800    ValidateLayerOrdering(*pCreateInfo);
4801
4802    return result;
4803}
4804
4805// prototype
4806static void deleteRenderPasses(layer_data *);
4807VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4808    // TODOSC : Shouldn't need any customization here
4809    dispatch_key key = get_dispatch_key(device);
4810    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4811    // Free all the memory
4812    loader_platform_thread_lock_mutex(&globalLock);
4813    deletePipelines(dev_data);
4814    deleteRenderPasses(dev_data);
4815    deleteCommandBuffers(dev_data);
4816    deletePools(dev_data);
4817    deleteLayouts(dev_data);
4818    dev_data->imageViewMap.clear();
4819    dev_data->imageMap.clear();
4820    dev_data->imageSubresourceMap.clear();
4821    dev_data->imageLayoutMap.clear();
4822    dev_data->bufferViewMap.clear();
4823    dev_data->bufferMap.clear();
4824    loader_platform_thread_unlock_mutex(&globalLock);
4825#if MTMERGESOURCE
4826    VkBool32 skipCall = VK_FALSE;
4827    loader_platform_thread_lock_mutex(&globalLock);
4828    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4829            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4830    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4831            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4832    print_mem_list(dev_data, device);
4833    printCBList(dev_data, device);
4834    delete_cmd_buf_info_list(dev_data);
4835    // Report any memory leaks
4836    DEVICE_MEM_INFO *pInfo = NULL;
4837    if (dev_data->memObjMap.size() > 0) {
4838        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4839            pInfo = &(*ii).second;
4840            if (pInfo->allocInfo.allocationSize != 0) {
4841                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4842                skipCall |=
4843                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4844                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK,
4845                            "MEM", "Mem Object %" PRIu64 " has not been freed. You should clean up this memory by calling "
4846                                   "vkFreeMemory(%" PRIu64 ") prior to vkDestroyDevice().",
4847                            (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4848            }
4849        }
4850    }
4851    // Queues persist until device is destroyed
4852    delete_queue_info_list(dev_data);
4853    layer_debug_report_destroy_device(device);
4854    loader_platform_thread_unlock_mutex(&globalLock);
4855
4856#if DISPATCH_MAP_DEBUG
4857    fprintf(stderr, "Device: %p, key: %p\n", device, key);
4858#endif
4859    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4860    if (VK_FALSE == skipCall) {
4861        pDisp->DestroyDevice(device, pAllocator);
4862    }
4863#else
4864    dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
4865#endif
4866    delete dev_data->device_dispatch_table;
4867    layer_data_map.erase(key);
4868}
4869
4870#if MTMERGESOURCE
4871VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
4872vkGetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties *pMemoryProperties) {
4873    layer_data *my_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
4874    VkLayerInstanceDispatchTable *pInstanceTable = my_data->instance_dispatch_table;
4875    pInstanceTable->GetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties);
4876    memcpy(&memProps, pMemoryProperties, sizeof(VkPhysicalDeviceMemoryProperties));
4877}
4878#endif
4879
4880static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4881
4882VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4883vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
4884    return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
4885}
4886
4887VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4888vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
4889    return util_GetLayerProperties(ARRAY_SIZE(cv_global_layers), cv_global_layers, pCount, pProperties);
4890}
4891
4892// TODO: Why does this exist - can we just use global?
4893static const VkLayerProperties cv_device_layers[] = {{
4894    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
4895}};
4896
4897VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
4898                                                                                    const char *pLayerName, uint32_t *pCount,
4899                                                                                    VkExtensionProperties *pProperties) {
4900    if (pLayerName == NULL) {
4901        dispatch_key key = get_dispatch_key(physicalDevice);
4902        layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4903        return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
4904    } else {
4905        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
4906    }
4907}
4908
4909VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4910vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
4911    /* draw_state physical device layers are the same as global */
4912    return util_GetLayerProperties(ARRAY_SIZE(cv_device_layers), cv_device_layers, pCount, pProperties);
4913}
4914
4915// This validates that the initial layout specified in the command buffer for
4916// the IMAGE is the same
4917// as the global IMAGE layout
4918VkBool32 ValidateCmdBufImageLayouts(VkCommandBuffer cmdBuffer) {
4919    VkBool32 skip_call = VK_FALSE;
4920    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
4921    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
4922    for (auto cb_image_data : pCB->imageLayoutMap) {
4923        VkImageLayout imageLayout;
4924        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4925            skip_call |=
4926                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4927                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image %" PRIu64 ".",
4928                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4929        } else {
4930            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4931                // TODO: Set memory invalid which is in mem_tracker currently
4932            } else if (imageLayout != cb_image_data.second.initialLayout) {
4933                skip_call |=
4934                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4935                            reinterpret_cast<uint64_t &>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4936                            "Cannot submit cmd buffer using image (%" PRIx64 ") with layout %s when "
4937                            "first use is %s.",
4938                            reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4939                            string_VkImageLayout(cb_image_data.second.initialLayout));
4940            }
4941            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4942        }
4943    }
4944    return skip_call;
4945}
4946
4947// Track which resources are in-flight by atomically incrementing their "in_use" count
4948VkBool32 validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB) {
4949    VkBool32 skip_call = VK_FALSE;
4950    for (auto drawDataElement : pCB->drawData) {
4951        for (auto buffer : drawDataElement.buffers) {
4952            auto buffer_data = my_data->bufferMap.find(buffer);
4953            if (buffer_data == my_data->bufferMap.end()) {
4954                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4955                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4956                                     "Cannot submit cmd buffer using deleted buffer %" PRIu64 ".", (uint64_t)(buffer));
4957            } else {
4958                buffer_data->second.in_use.fetch_add(1);
4959            }
4960        }
4961    }
4962    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4963        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4964            auto setNode = my_data->setMap.find(set);
4965            if (setNode == my_data->setMap.end()) {
4966                skip_call |=
4967                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4968                            (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS",
4969                            "Cannot submit cmd buffer using deleted descriptor set %" PRIu64 ".", (uint64_t)(set));
4970            } else {
4971                setNode->second->in_use.fetch_add(1);
4972            }
4973        }
4974    }
4975    for (auto semaphore : pCB->semaphores) {
4976        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4977        if (semaphoreNode == my_data->semaphoreMap.end()) {
4978            skip_call |=
4979                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4980                        reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4981                        "Cannot submit cmd buffer using deleted semaphore %" PRIu64 ".", reinterpret_cast<uint64_t &>(semaphore));
4982        } else {
4983            semaphoreNode->second.in_use.fetch_add(1);
4984        }
4985    }
4986    for (auto event : pCB->events) {
4987        auto eventNode = my_data->eventMap.find(event);
4988        if (eventNode == my_data->eventMap.end()) {
4989            skip_call |=
4990                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4991                        reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4992                        "Cannot submit cmd buffer using deleted event %" PRIu64 ".", reinterpret_cast<uint64_t &>(event));
4993        } else {
4994            eventNode->second.in_use.fetch_add(1);
4995        }
4996    }
4997    return skip_call;
4998}
4999
5000void decrementResources(layer_data *my_data, VkCommandBuffer cmdBuffer) {
5001    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
5002    for (auto drawDataElement : pCB->drawData) {
5003        for (auto buffer : drawDataElement.buffers) {
5004            auto buffer_data = my_data->bufferMap.find(buffer);
5005            if (buffer_data != my_data->bufferMap.end()) {
5006                buffer_data->second.in_use.fetch_sub(1);
5007            }
5008        }
5009    }
5010    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
5011        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
5012            auto setNode = my_data->setMap.find(set);
5013            if (setNode != my_data->setMap.end()) {
5014                setNode->second->in_use.fetch_sub(1);
5015            }
5016        }
5017    }
5018    for (auto semaphore : pCB->semaphores) {
5019        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
5020        if (semaphoreNode != my_data->semaphoreMap.end()) {
5021            semaphoreNode->second.in_use.fetch_sub(1);
5022        }
5023    }
5024    for (auto event : pCB->events) {
5025        auto eventNode = my_data->eventMap.find(event);
5026        if (eventNode != my_data->eventMap.end()) {
5027            eventNode->second.in_use.fetch_sub(1);
5028        }
5029    }
5030    for (auto queryStatePair : pCB->queryToStateMap) {
5031        my_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
5032    }
5033    for (auto eventStagePair : pCB->eventToStageMap) {
5034        my_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
5035    }
5036}
5037
5038void decrementResources(layer_data *my_data, uint32_t fenceCount, const VkFence *pFences) {
5039    for (uint32_t i = 0; i < fenceCount; ++i) {
5040        auto fence_data = my_data->fenceMap.find(pFences[i]);
5041        if (fence_data == my_data->fenceMap.end() || !fence_data->second.needsSignaled)
5042            return;
5043        fence_data->second.needsSignaled = false;
5044        fence_data->second.in_use.fetch_sub(1);
5045        decrementResources(my_data, fence_data->second.priorFences.size(), fence_data->second.priorFences.data());
5046        for (auto cmdBuffer : fence_data->second.cmdBuffers) {
5047            decrementResources(my_data, cmdBuffer);
5048        }
5049    }
5050}
5051
5052void decrementResources(layer_data *my_data, VkQueue queue) {
5053    auto queue_data = my_data->queueMap.find(queue);
5054    if (queue_data != my_data->queueMap.end()) {
5055        for (auto cmdBuffer : queue_data->second.untrackedCmdBuffers) {
5056            decrementResources(my_data, cmdBuffer);
5057        }
5058        queue_data->second.untrackedCmdBuffers.clear();
5059        decrementResources(my_data, queue_data->second.lastFences.size(), queue_data->second.lastFences.data());
5060    }
5061}
5062
5063void updateTrackedCommandBuffers(layer_data *dev_data, VkQueue queue, VkQueue other_queue, VkFence fence) {
5064    if (queue == other_queue) {
5065        return;
5066    }
5067    auto queue_data = dev_data->queueMap.find(queue);
5068    auto other_queue_data = dev_data->queueMap.find(other_queue);
5069    if (queue_data == dev_data->queueMap.end() || other_queue_data == dev_data->queueMap.end()) {
5070        return;
5071    }
5072    for (auto fence : other_queue_data->second.lastFences) {
5073        queue_data->second.lastFences.push_back(fence);
5074    }
5075    if (fence != VK_NULL_HANDLE) {
5076        auto fence_data = dev_data->fenceMap.find(fence);
5077        if (fence_data == dev_data->fenceMap.end()) {
5078            return;
5079        }
5080        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
5081            fence_data->second.cmdBuffers.push_back(cmdbuffer);
5082        }
5083        other_queue_data->second.untrackedCmdBuffers.clear();
5084    } else {
5085        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
5086            queue_data->second.untrackedCmdBuffers.push_back(cmdbuffer);
5087        }
5088        other_queue_data->second.untrackedCmdBuffers.clear();
5089    }
5090    for (auto eventStagePair : other_queue_data->second.eventToStageMap) {
5091        queue_data->second.eventToStageMap[eventStagePair.first] = eventStagePair.second;
5092    }
5093}
5094
5095void trackCommandBuffers(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
5096    auto queue_data = my_data->queueMap.find(queue);
5097    if (fence != VK_NULL_HANDLE) {
5098        vector<VkFence> prior_fences;
5099        auto fence_data = my_data->fenceMap.find(fence);
5100        if (fence_data == my_data->fenceMap.end()) {
5101            return;
5102        }
5103        if (queue_data != my_data->queueMap.end()) {
5104            prior_fences = queue_data->second.lastFences;
5105            queue_data->second.lastFences.clear();
5106            queue_data->second.lastFences.push_back(fence);
5107            for (auto cmdbuffer : queue_data->second.untrackedCmdBuffers) {
5108                fence_data->second.cmdBuffers.push_back(cmdbuffer);
5109            }
5110            queue_data->second.untrackedCmdBuffers.clear();
5111        }
5112        fence_data->second.cmdBuffers.clear();
5113        fence_data->second.priorFences = prior_fences;
5114        fence_data->second.needsSignaled = true;
5115        fence_data->second.queue = queue;
5116        fence_data->second.in_use.fetch_add(1);
5117        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5118            const VkSubmitInfo *submit = &pSubmits[submit_idx];
5119            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5120                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5121                    fence_data->second.cmdBuffers.push_back(secondaryCmdBuffer);
5122                }
5123                fence_data->second.cmdBuffers.push_back(submit->pCommandBuffers[i]);
5124            }
5125        }
5126    } else {
5127        if (queue_data != my_data->queueMap.end()) {
5128            for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5129                const VkSubmitInfo *submit = &pSubmits[submit_idx];
5130                for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5131                    for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5132                        queue_data->second.untrackedCmdBuffers.push_back(secondaryCmdBuffer);
5133                    }
5134                    queue_data->second.untrackedCmdBuffers.push_back(submit->pCommandBuffers[i]);
5135                }
5136            }
5137        }
5138    }
5139    if (queue_data != my_data->queueMap.end()) {
5140        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5141            const VkSubmitInfo *submit = &pSubmits[submit_idx];
5142            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5143                // Add cmdBuffers to both the global set and queue set
5144                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5145                    my_data->globalInFlightCmdBuffers.insert(secondaryCmdBuffer);
5146                    queue_data->second.inFlightCmdBuffers.insert(secondaryCmdBuffer);
5147                }
5148                my_data->globalInFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
5149                queue_data->second.inFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
5150            }
5151        }
5152    }
5153}
5154
5155bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5156    bool skip_call = false;
5157    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
5158        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
5159        skip_call |=
5160            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5161                    __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
5162                    "Command Buffer %#" PRIx64 " is already in use and is not marked for simultaneous use.",
5163                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
5164    }
5165    return skip_call;
5166}
5167
5168static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5169    bool skipCall = false;
5170    // Validate that cmd buffers have been updated
5171    if (CB_RECORDED != pCB->state) {
5172        if (CB_INVALID == pCB->state) {
5173            // Inform app of reason CB invalid
5174            bool causeReported = false;
5175            if (!pCB->destroyedSets.empty()) {
5176                std::stringstream set_string;
5177                for (auto set : pCB->destroyedSets)
5178                    set_string << " " << set;
5179
5180                skipCall |=
5181                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5182                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5183                            "You are submitting command buffer %#" PRIxLEAST64
5184                            " that is invalid because it had the following bound descriptor set(s) destroyed: %s",
5185                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5186                causeReported = true;
5187            }
5188            if (!pCB->updatedSets.empty()) {
5189                std::stringstream set_string;
5190                for (auto set : pCB->updatedSets)
5191                    set_string << " " << set;
5192
5193                skipCall |=
5194                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5195                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5196                            "You are submitting command buffer %#" PRIxLEAST64
5197                            " that is invalid because it had the following bound descriptor set(s) updated: %s",
5198                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5199                causeReported = true;
5200            }
5201            if (!pCB->destroyedFramebuffers.empty()) {
5202                std::stringstream fb_string;
5203                for (auto fb : pCB->destroyedFramebuffers)
5204                    fb_string << " " << fb;
5205
5206                skipCall |=
5207                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5208                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5209                            "You are submitting command buffer %#" PRIxLEAST64 " that is invalid because it had the following "
5210                            "referenced framebuffers destroyed: %s",
5211                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), fb_string.str().c_str());
5212                causeReported = true;
5213            }
5214            // TODO : This is defensive programming to make sure an error is
5215            //  flagged if we hit this INVALID cmd buffer case and none of the
5216            //  above cases are hit. As the number of INVALID cases grows, this
5217            //  code should be updated to seemlessly handle all the cases.
5218            if (!causeReported) {
5219                skipCall |= log_msg(
5220                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5221                    reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5222                    "You are submitting command buffer %#" PRIxLEAST64 " that is invalid due to an unknown cause. Validation "
5223                    "should "
5224                    "be improved to report the exact cause.",
5225                    reinterpret_cast<uint64_t &>(pCB->commandBuffer));
5226            }
5227        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
5228            skipCall |=
5229                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5230                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
5231                        "You must call vkEndCommandBuffer() on CB %#" PRIxLEAST64 " before this call to vkQueueSubmit()!",
5232                        (uint64_t)(pCB->commandBuffer));
5233        }
5234    }
5235    return skipCall;
5236}
5237
5238static VkBool32 validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5239    // Track in-use for resources off of primary and any secondary CBs
5240    VkBool32 skipCall = validateAndIncrementResources(dev_data, pCB);
5241    if (!pCB->secondaryCommandBuffers.empty()) {
5242        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
5243            skipCall |= validateAndIncrementResources(dev_data, dev_data->commandBufferMap[secondaryCmdBuffer]);
5244            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
5245            if (pSubCB->primaryCommandBuffer != pCB->commandBuffer) {
5246                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5247                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5248                        "CB %#" PRIxLEAST64 " was submitted with secondary buffer %#" PRIxLEAST64
5249                        " but that buffer has subsequently been bound to "
5250                        "primary cmd buffer %#" PRIxLEAST64 ".",
5251                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
5252                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
5253            }
5254        }
5255    }
5256    // TODO : Verify if this also needs to be checked for secondary command
5257    //  buffers. If so, this block of code can move to
5258    //   validateCommandBufferState() function. vulkan GL106 filed to clarify
5259    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
5260        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5261                            __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5262                            "CB %#" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
5263                            "set, but has been submitted %#" PRIxLEAST64 " times.",
5264                            (uint64_t)(pCB->commandBuffer), pCB->submitCount);
5265    }
5266    skipCall |= validateCommandBufferState(dev_data, pCB);
5267    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
5268    // on device
5269    skipCall |= validateCommandBufferSimultaneousUse(dev_data, pCB);
5270    return skipCall;
5271}
5272
5273VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5274vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
5275    VkBool32 skipCall = VK_FALSE;
5276    GLOBAL_CB_NODE *pCBNode = NULL;
5277    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5278    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5279    loader_platform_thread_lock_mutex(&globalLock);
5280#if MTMERGESOURCE
5281    // TODO : Need to track fence and clear mem references when fence clears
5282    // MTMTODO : Merge this code with code below to avoid duplicating efforts
5283    uint64_t fenceId = 0;
5284    skipCall = add_fence_info(dev_data, fence, queue, &fenceId);
5285
5286    print_mem_list(dev_data, queue);
5287    printCBList(dev_data, queue);
5288    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5289        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5290        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5291            pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5292            if (pCBNode) {
5293                pCBNode->fenceId = fenceId;
5294                pCBNode->lastSubmittedFence = fence;
5295                pCBNode->lastSubmittedQueue = queue;
5296                for (auto &function : pCBNode->validate_functions) {
5297                    skipCall |= function();
5298                }
5299                for (auto &function : pCBNode->eventUpdates) {
5300                    skipCall |= static_cast<VkBool32>(function(queue));
5301                }
5302            }
5303        }
5304
5305        for (uint32_t i = 0; i < submit->waitSemaphoreCount; i++) {
5306            VkSemaphore sem = submit->pWaitSemaphores[i];
5307
5308            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5309                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_SIGNALLED) {
5310                    skipCall =
5311                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5312                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
5313                                "vkQueueSubmit: Semaphore must be in signaled state before passing to pWaitSemaphores");
5314                }
5315                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_WAIT;
5316            }
5317        }
5318        for (uint32_t i = 0; i < submit->signalSemaphoreCount; i++) {
5319            VkSemaphore sem = submit->pSignalSemaphores[i];
5320
5321            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5322                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
5323                    skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5324                                       VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT, (uint64_t)sem, __LINE__, MEMTRACK_NONE,
5325                                       "SEMAPHORE", "vkQueueSubmit: Semaphore must not be currently signaled or in a wait state");
5326                }
5327                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
5328            }
5329        }
5330    }
5331#endif
5332    // First verify that fence is not in use
5333    if ((fence != VK_NULL_HANDLE) && (submitCount != 0) && dev_data->fenceMap[fence].in_use.load()) {
5334        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5335                            (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5336                            "Fence %#" PRIx64 " is already in use by another submission.", (uint64_t)(fence));
5337    }
5338    // Now verify each individual submit
5339    std::unordered_set<VkQueue> processed_other_queues;
5340    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5341        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5342        vector<VkSemaphore> semaphoreList;
5343        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
5344            const VkSemaphore &semaphore = submit->pWaitSemaphores[i];
5345            semaphoreList.push_back(semaphore);
5346            if (dev_data->semaphoreMap[semaphore].signaled) {
5347                dev_data->semaphoreMap[semaphore].signaled = 0;
5348            } else {
5349                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5350                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
5351                                    "DS", "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
5352                                    reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
5353            }
5354            const VkQueue &other_queue = dev_data->semaphoreMap[semaphore].queue;
5355            if (other_queue != VK_NULL_HANDLE && !processed_other_queues.count(other_queue)) {
5356                updateTrackedCommandBuffers(dev_data, queue, other_queue, fence);
5357                processed_other_queues.insert(other_queue);
5358            }
5359        }
5360        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
5361            const VkSemaphore &semaphore = submit->pSignalSemaphores[i];
5362            semaphoreList.push_back(semaphore);
5363            if (dev_data->semaphoreMap[semaphore].signaled) {
5364                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5365                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
5366                                    "DS", "Queue %#" PRIx64 " is signaling semaphore %#" PRIx64
5367                                          " that has already been signaled but not waited on by queue %#" PRIx64 ".",
5368                                    reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
5369                                    reinterpret_cast<uint64_t &>(dev_data->semaphoreMap[semaphore].queue));
5370            } else {
5371                dev_data->semaphoreMap[semaphore].signaled = 1;
5372                dev_data->semaphoreMap[semaphore].queue = queue;
5373            }
5374        }
5375        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5376            skipCall |= ValidateCmdBufImageLayouts(submit->pCommandBuffers[i]);
5377            pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5378            pCBNode->semaphores = semaphoreList;
5379            pCBNode->submitCount++; // increment submit count
5380            skipCall |= validatePrimaryCommandBufferState(dev_data, pCBNode);
5381        }
5382    }
5383    // Update cmdBuffer-related data structs and mark fence in-use
5384    trackCommandBuffers(dev_data, queue, submitCount, pSubmits, fence);
5385    loader_platform_thread_unlock_mutex(&globalLock);
5386    if (VK_FALSE == skipCall)
5387        result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence);
5388#if MTMERGESOURCE
5389    loader_platform_thread_lock_mutex(&globalLock);
5390    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5391        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5392        for (uint32_t i = 0; i < submit->waitSemaphoreCount; i++) {
5393            VkSemaphore sem = submit->pWaitSemaphores[i];
5394
5395            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
5396                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
5397            }
5398        }
5399    }
5400    loader_platform_thread_unlock_mutex(&globalLock);
5401#endif
5402    return result;
5403}
5404
5405#if MTMERGESOURCE
5406VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
5407                                                                const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
5408    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5409    VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
5410    // TODO : Track allocations and overall size here
5411    loader_platform_thread_lock_mutex(&globalLock);
5412    add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
5413    print_mem_list(my_data, device);
5414    loader_platform_thread_unlock_mutex(&globalLock);
5415    return result;
5416}
5417
5418VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5419vkFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
5420    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5421
5422    // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
5423    // Before freeing a memory object, an application must ensure the memory object is no longer
5424    // in use by the device—for example by command buffers queued for execution. The memory need
5425    // not yet be unbound from all images and buffers, but any further use of those images or
5426    // buffers (on host or device) for anything other than destroying those objects will result in
5427    // undefined behavior.
5428
5429    loader_platform_thread_lock_mutex(&globalLock);
5430    freeMemObjInfo(my_data, device, mem, VK_FALSE);
5431    print_mem_list(my_data, device);
5432    printCBList(my_data, device);
5433    loader_platform_thread_unlock_mutex(&globalLock);
5434    my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator);
5435}
5436
5437VkBool32 validateMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5438    VkBool32 skipCall = VK_FALSE;
5439
5440    if (size == 0) {
5441        // TODO: a size of 0 is not listed as an invalid use in the spec, should it be?
5442        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5443                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5444                           "VkMapMemory: Attempting to map memory range of size zero");
5445    }
5446
5447    auto mem_element = my_data->memObjMap.find(mem);
5448    if (mem_element != my_data->memObjMap.end()) {
5449        // It is an application error to call VkMapMemory on an object that is already mapped
5450        if (mem_element->second.memRange.size != 0) {
5451            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5452                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5453                               "VkMapMemory: Attempting to map memory on an already-mapped object %#" PRIxLEAST64, (uint64_t)mem);
5454        }
5455
5456        // Validate that offset + size is within object's allocationSize
5457        if (size == VK_WHOLE_SIZE) {
5458            if (offset >= mem_element->second.allocInfo.allocationSize) {
5459                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5460                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5461                                   "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5462                                   mem_element->second.allocInfo.allocationSize, mem_element->second.allocInfo.allocationSize);
5463            }
5464        } else {
5465            if ((offset + size) > mem_element->second.allocInfo.allocationSize) {
5466                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5467                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5468                                   "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5469                                   size + offset, mem_element->second.allocInfo.allocationSize);
5470            }
5471        }
5472    }
5473    return skipCall;
5474}
5475
5476void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5477    auto mem_element = my_data->memObjMap.find(mem);
5478    if (mem_element != my_data->memObjMap.end()) {
5479        MemRange new_range;
5480        new_range.offset = offset;
5481        new_range.size = size;
5482        mem_element->second.memRange = new_range;
5483    }
5484}
5485
5486VkBool32 deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
5487    VkBool32 skipCall = VK_FALSE;
5488    auto mem_element = my_data->memObjMap.find(mem);
5489    if (mem_element != my_data->memObjMap.end()) {
5490        if (!mem_element->second.memRange.size) {
5491            // Valid Usage: memory must currently be mapped
5492            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5493                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5494                               "Unmapping Memory without memory being mapped: mem obj %#" PRIxLEAST64, (uint64_t)mem);
5495        }
5496        mem_element->second.memRange.size = 0;
5497        if (mem_element->second.pData) {
5498            free(mem_element->second.pData);
5499            mem_element->second.pData = 0;
5500        }
5501    }
5502    return skipCall;
5503}
5504
5505static char NoncoherentMemoryFillValue = 0xb;
5506
5507void initializeAndTrackMemory(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize size, void **ppData) {
5508    auto mem_element = my_data->memObjMap.find(mem);
5509    if (mem_element != my_data->memObjMap.end()) {
5510        mem_element->second.pDriverData = *ppData;
5511        uint32_t index = mem_element->second.allocInfo.memoryTypeIndex;
5512        if (memProps.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
5513            mem_element->second.pData = 0;
5514        } else {
5515            if (size == VK_WHOLE_SIZE) {
5516                size = mem_element->second.allocInfo.allocationSize;
5517            }
5518            size_t convSize = (size_t)(size);
5519            mem_element->second.pData = malloc(2 * convSize);
5520            memset(mem_element->second.pData, NoncoherentMemoryFillValue, 2 * convSize);
5521            *ppData = static_cast<char *>(mem_element->second.pData) + (convSize / 2);
5522        }
5523    }
5524}
5525#endif
5526// Note: This function assumes that the global lock is held by the calling
5527// thread.
5528VkBool32 cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
5529    VkBool32 skip_call = VK_FALSE;
5530    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
5531    if (pCB) {
5532        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
5533            for (auto event : queryEventsPair.second) {
5534                if (my_data->eventMap[event].needsSignaled) {
5535                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5536                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
5537                                         "Cannot get query results on queryPool %" PRIu64
5538                                         " with index %d which was guarded by unsignaled event %" PRIu64 ".",
5539                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
5540                }
5541            }
5542        }
5543    }
5544    return skip_call;
5545}
5546// Remove given cmd_buffer from the global inFlight set.
5547//  Also, if given queue is valid, then remove the cmd_buffer from that queues
5548//  inFlightCmdBuffer set. Finally, check all other queues and if given cmd_buffer
5549//  is still in flight on another queue, add it back into the global set.
5550// Note: This function assumes that the global lock is held by the calling
5551// thread.
5552static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkQueue queue) {
5553    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
5554    dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
5555    if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) {
5556        dev_data->queueMap[queue].inFlightCmdBuffers.erase(cmd_buffer);
5557        for (auto q : dev_data->queues) {
5558            if ((q != queue) &&
5559                (dev_data->queueMap[q].inFlightCmdBuffers.find(cmd_buffer) != dev_data->queueMap[q].inFlightCmdBuffers.end())) {
5560                dev_data->globalInFlightCmdBuffers.insert(cmd_buffer);
5561                break;
5562            }
5563        }
5564    }
5565}
5566#if MTMERGESOURCE
5567static inline bool verifyFenceStatus(VkDevice device, VkFence fence, const char *apiCall) {
5568    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5569    VkBool32 skipCall = false;
5570    auto pFenceInfo = my_data->fenceMap.find(fence);
5571    if (pFenceInfo != my_data->fenceMap.end()) {
5572        if (pFenceInfo->second.firstTimeFlag != VK_TRUE) {
5573            if ((pFenceInfo->second.createInfo.flags & VK_FENCE_CREATE_SIGNALED_BIT) &&
5574                pFenceInfo->second.firstTimeFlag != VK_TRUE) {
5575                skipCall |=
5576                    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5577                            (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5578                            "%s specified fence %#" PRIxLEAST64 " already in SIGNALED state.", apiCall, (uint64_t)fence);
5579            }
5580            if (!pFenceInfo->second.queue && !pFenceInfo->second.swapchain) { // Checking status of unsubmitted fence
5581                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5582                                    reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5583                                    "%s called for fence %#" PRIxLEAST64 " which has not been submitted on a Queue or during "
5584                                    "acquire next image.",
5585                                    apiCall, reinterpret_cast<uint64_t &>(fence));
5586            }
5587        } else {
5588            pFenceInfo->second.firstTimeFlag = VK_FALSE;
5589        }
5590    }
5591    return skipCall;
5592}
5593#endif
5594VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5595vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
5596    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5597    VkBool32 skip_call = VK_FALSE;
5598#if MTMERGESOURCE
5599    // Verify fence status of submitted fences
5600    loader_platform_thread_lock_mutex(&globalLock);
5601    for (uint32_t i = 0; i < fenceCount; i++) {
5602        skip_call |= verifyFenceStatus(device, pFences[i], "vkWaitForFences");
5603    }
5604    loader_platform_thread_unlock_mutex(&globalLock);
5605    if (skip_call)
5606        return VK_ERROR_VALIDATION_FAILED_EXT;
5607#endif
5608    VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
5609
5610    if (result == VK_SUCCESS) {
5611        loader_platform_thread_lock_mutex(&globalLock);
5612        // When we know that all fences are complete we can clean/remove their CBs
5613        if (waitAll || fenceCount == 1) {
5614            for (uint32_t i = 0; i < fenceCount; ++i) {
5615#if MTMERGESOURCE
5616                update_fence_tracking(dev_data, pFences[i]);
5617#endif
5618                VkQueue fence_queue = dev_data->fenceMap[pFences[i]].queue;
5619                for (auto cmdBuffer : dev_data->fenceMap[pFences[i]].cmdBuffers) {
5620                    skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5621                    removeInFlightCmdBuffer(dev_data, cmdBuffer, fence_queue);
5622                }
5623            }
5624            decrementResources(dev_data, fenceCount, pFences);
5625        }
5626        // NOTE : Alternate case not handled here is when some fences have completed. In
5627        //  this case for app to guarantee which fences completed it will have to call
5628        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
5629        loader_platform_thread_unlock_mutex(&globalLock);
5630    }
5631    if (VK_FALSE != skip_call)
5632        return VK_ERROR_VALIDATION_FAILED_EXT;
5633    return result;
5634}
5635
5636VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device, VkFence fence) {
5637    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5638    bool skipCall = false;
5639    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5640#if MTMERGESOURCE
5641    loader_platform_thread_lock_mutex(&globalLock);
5642    skipCall = verifyFenceStatus(device, fence, "vkGetFenceStatus");
5643    loader_platform_thread_unlock_mutex(&globalLock);
5644    if (skipCall)
5645        return result;
5646#endif
5647    result = dev_data->device_dispatch_table->GetFenceStatus(device, fence);
5648    VkBool32 skip_call = VK_FALSE;
5649    loader_platform_thread_lock_mutex(&globalLock);
5650    if (result == VK_SUCCESS) {
5651#if MTMERGESOURCE
5652        update_fence_tracking(dev_data, fence);
5653#endif
5654        auto fence_queue = dev_data->fenceMap[fence].queue;
5655        for (auto cmdBuffer : dev_data->fenceMap[fence].cmdBuffers) {
5656            skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5657            removeInFlightCmdBuffer(dev_data, cmdBuffer, fence_queue);
5658        }
5659        decrementResources(dev_data, 1, &fence);
5660    }
5661    loader_platform_thread_unlock_mutex(&globalLock);
5662    if (VK_FALSE != skip_call)
5663        return VK_ERROR_VALIDATION_FAILED_EXT;
5664    return result;
5665}
5666
5667VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
5668                                                            VkQueue *pQueue) {
5669    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5670    dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
5671    loader_platform_thread_lock_mutex(&globalLock);
5672
5673    // Add queue to tracking set only if it is new
5674    auto result = dev_data->queues.emplace(*pQueue);
5675    if (result.second == true) {
5676        QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
5677        pQNode->device = device;
5678#if MTMERGESOURCE
5679        pQNode->lastRetiredId = 0;
5680        pQNode->lastSubmittedId = 0;
5681#endif
5682    }
5683
5684    loader_platform_thread_unlock_mutex(&globalLock);
5685}
5686
5687VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue) {
5688    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5689    decrementResources(dev_data, queue);
5690    VkBool32 skip_call = VK_FALSE;
5691    loader_platform_thread_lock_mutex(&globalLock);
5692    // Iterate over local set since we erase set members as we go in for loop
5693    auto local_cb_set = dev_data->queueMap[queue].inFlightCmdBuffers;
5694    for (auto cmdBuffer : local_cb_set) {
5695        skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5696        removeInFlightCmdBuffer(dev_data, cmdBuffer, queue);
5697    }
5698    dev_data->queueMap[queue].inFlightCmdBuffers.clear();
5699    loader_platform_thread_unlock_mutex(&globalLock);
5700    if (VK_FALSE != skip_call)
5701        return VK_ERROR_VALIDATION_FAILED_EXT;
5702    VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue);
5703#if MTMERGESOURCE
5704    if (VK_SUCCESS == result) {
5705        loader_platform_thread_lock_mutex(&globalLock);
5706        retire_queue_fences(dev_data, queue);
5707        loader_platform_thread_unlock_mutex(&globalLock);
5708    }
5709#endif
5710    return result;
5711}
5712
5713VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device) {
5714    VkBool32 skip_call = VK_FALSE;
5715    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5716    loader_platform_thread_lock_mutex(&globalLock);
5717    for (auto queue : dev_data->queues) {
5718        decrementResources(dev_data, queue);
5719        if (dev_data->queueMap.find(queue) != dev_data->queueMap.end()) {
5720            // Clear all of the queue inFlightCmdBuffers (global set cleared below)
5721            dev_data->queueMap[queue].inFlightCmdBuffers.clear();
5722        }
5723    }
5724    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5725        skip_call |= cleanInFlightCmdBuffer(dev_data, cmdBuffer);
5726    }
5727    dev_data->globalInFlightCmdBuffers.clear();
5728    loader_platform_thread_unlock_mutex(&globalLock);
5729    if (VK_FALSE != skip_call)
5730        return VK_ERROR_VALIDATION_FAILED_EXT;
5731    VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device);
5732#if MTMERGESOURCE
5733    if (VK_SUCCESS == result) {
5734        loader_platform_thread_lock_mutex(&globalLock);
5735        retire_device_fences(dev_data, device);
5736        loader_platform_thread_unlock_mutex(&globalLock);
5737    }
5738#endif
5739    return result;
5740}
5741
5742VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
5743    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5744    bool skipCall = false;
5745    loader_platform_thread_lock_mutex(&globalLock);
5746    if (dev_data->fenceMap[fence].in_use.load()) {
5747        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5748                            (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5749                            "Fence %#" PRIx64 " is in use by a command buffer.", (uint64_t)(fence));
5750    }
5751#if MTMERGESOURCE
5752    delete_fence_info(dev_data, fence);
5753    auto item = dev_data->fenceMap.find(fence);
5754    if (item != dev_data->fenceMap.end()) {
5755        dev_data->fenceMap.erase(item);
5756    }
5757#endif
5758    loader_platform_thread_unlock_mutex(&globalLock);
5759    if (!skipCall)
5760        dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
5761}
5762
5763VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5764vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5765    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5766    dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
5767    loader_platform_thread_lock_mutex(&globalLock);
5768    auto item = dev_data->semaphoreMap.find(semaphore);
5769    if (item != dev_data->semaphoreMap.end()) {
5770        if (item->second.in_use.load()) {
5771            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5772                    reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
5773                    "Cannot delete semaphore %" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore));
5774        }
5775        dev_data->semaphoreMap.erase(semaphore);
5776    }
5777    loader_platform_thread_unlock_mutex(&globalLock);
5778    // TODO : Clean up any internal data structures using this obj.
5779}
5780
5781VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5782    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5783    bool skip_call = false;
5784    loader_platform_thread_lock_mutex(&globalLock);
5785    auto event_data = dev_data->eventMap.find(event);
5786    if (event_data != dev_data->eventMap.end()) {
5787        if (event_data->second.in_use.load()) {
5788            skip_call |= log_msg(
5789                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5790                reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
5791                "Cannot delete event %" PRIx64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event));
5792        }
5793        dev_data->eventMap.erase(event_data);
5794    }
5795    loader_platform_thread_unlock_mutex(&globalLock);
5796    if (!skip_call)
5797        dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator);
5798    // TODO : Clean up any internal data structures using this obj.
5799}
5800
5801VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5802vkDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5803    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5804        ->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
5805    // TODO : Clean up any internal data structures using this obj.
5806}
5807
5808VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
5809                                                     uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
5810                                                     VkQueryResultFlags flags) {
5811    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5812    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
5813    GLOBAL_CB_NODE *pCB = nullptr;
5814    loader_platform_thread_lock_mutex(&globalLock);
5815    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5816        pCB = getCBNode(dev_data, cmdBuffer);
5817        for (auto queryStatePair : pCB->queryToStateMap) {
5818            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
5819        }
5820    }
5821    VkBool32 skip_call = VK_FALSE;
5822    for (uint32_t i = 0; i < queryCount; ++i) {
5823        QueryObject query = {queryPool, firstQuery + i};
5824        auto queryElement = queriesInFlight.find(query);
5825        auto queryToStateElement = dev_data->queryToStateMap.find(query);
5826        if (queryToStateElement != dev_data->queryToStateMap.end()) {
5827        }
5828        // Available and in flight
5829        if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5830            queryToStateElement->second) {
5831            for (auto cmdBuffer : queryElement->second) {
5832                pCB = getCBNode(dev_data, cmdBuffer);
5833                auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
5834                if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
5835                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5836                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5837                                         "Cannot get query results on queryPool %" PRIu64 " with index %d which is in flight.",
5838                                         (uint64_t)(queryPool), firstQuery + i);
5839                } else {
5840                    for (auto event : queryEventElement->second) {
5841                        dev_data->eventMap[event].needsSignaled = true;
5842                    }
5843                }
5844            }
5845            // Unavailable and in flight
5846        } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5847                   !queryToStateElement->second) {
5848            // TODO : Can there be the same query in use by multiple command buffers in flight?
5849            bool make_available = false;
5850            for (auto cmdBuffer : queryElement->second) {
5851                pCB = getCBNode(dev_data, cmdBuffer);
5852                make_available |= pCB->queryToStateMap[query];
5853            }
5854            if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5855                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5856                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5857                                     "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5858                                     (uint64_t)(queryPool), firstQuery + i);
5859            }
5860            // Unavailable
5861        } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5862            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT,
5863                                 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5864                                 "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5865                                 (uint64_t)(queryPool), firstQuery + i);
5866            // Unitialized
5867        } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5868            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT,
5869                                 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5870                                 "Cannot get query results on queryPool %" PRIu64 " with index %d as data has not been collected for this index.",
5871                                 (uint64_t)(queryPool), firstQuery + i);
5872        }
5873    }
5874    loader_platform_thread_unlock_mutex(&globalLock);
5875    if (skip_call)
5876        return VK_ERROR_VALIDATION_FAILED_EXT;
5877    return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride,
5878                                                                flags);
5879}
5880
5881VkBool32 validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5882    VkBool32 skip_call = VK_FALSE;
5883    auto buffer_data = my_data->bufferMap.find(buffer);
5884    if (buffer_data == my_data->bufferMap.end()) {
5885        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5886                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5887                             "Cannot free buffer %" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5888    } else {
5889        if (buffer_data->second.in_use.load()) {
5890            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5891                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5892                                 "Cannot free buffer %" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5893        }
5894    }
5895    return skip_call;
5896}
5897
5898VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5899vkDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
5900    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5901    VkBool32 skipCall = VK_FALSE;
5902    loader_platform_thread_lock_mutex(&globalLock);
5903#if MTMERGESOURCE
5904    auto item = dev_data->bufferBindingMap.find((uint64_t)buffer);
5905    if (item != dev_data->bufferBindingMap.end()) {
5906        skipCall = clear_object_binding(dev_data, device, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5907        dev_data->bufferBindingMap.erase(item);
5908    }
5909#endif
5910    if (!validateIdleBuffer(dev_data, buffer) && (VK_FALSE == skipCall)) {
5911        loader_platform_thread_unlock_mutex(&globalLock);
5912        dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator);
5913        loader_platform_thread_lock_mutex(&globalLock);
5914    }
5915    dev_data->bufferMap.erase(buffer);
5916    loader_platform_thread_unlock_mutex(&globalLock);
5917}
5918
5919VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5920vkDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5921    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5922    dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
5923    loader_platform_thread_lock_mutex(&globalLock);
5924    auto item = dev_data->bufferViewMap.find(bufferView);
5925    if (item != dev_data->bufferViewMap.end()) {
5926        dev_data->bufferViewMap.erase(item);
5927    }
5928    loader_platform_thread_unlock_mutex(&globalLock);
5929}
5930
5931VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5932    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5933    VkBool32 skipCall = VK_FALSE;
5934#if MTMERGESOURCE
5935    loader_platform_thread_lock_mutex(&globalLock);
5936    auto item = dev_data->imageBindingMap.find((uint64_t)image);
5937    if (item != dev_data->imageBindingMap.end()) {
5938        skipCall = clear_object_binding(dev_data, device, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5939        dev_data->imageBindingMap.erase(item);
5940    }
5941    loader_platform_thread_unlock_mutex(&globalLock);
5942#endif
5943    if (VK_FALSE == skipCall)
5944        dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
5945
5946    loader_platform_thread_lock_mutex(&globalLock);
5947    const auto& entry = dev_data->imageMap.find(image);
5948    if (entry != dev_data->imageMap.end()) {
5949        // Clear any memory mapping for this image
5950        const auto &mem_entry = dev_data->memObjMap.find(entry->second.mem);
5951        if (mem_entry != dev_data->memObjMap.end())
5952            mem_entry->second.image = VK_NULL_HANDLE;
5953
5954        // Remove image from imageMap
5955        dev_data->imageMap.erase(entry);
5956    }
5957    const auto& subEntry = dev_data->imageSubresourceMap.find(image);
5958    if (subEntry != dev_data->imageSubresourceMap.end()) {
5959        for (const auto& pair : subEntry->second) {
5960            dev_data->imageLayoutMap.erase(pair);
5961        }
5962        dev_data->imageSubresourceMap.erase(subEntry);
5963    }
5964    loader_platform_thread_unlock_mutex(&globalLock);
5965}
5966#if MTMERGESOURCE
5967VkBool32 print_memory_range_error(layer_data *dev_data, const uint64_t object_handle, const uint64_t other_handle,
5968                                  VkDebugReportObjectTypeEXT object_type) {
5969    if (object_type == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) {
5970        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5971                       MEMTRACK_INVALID_ALIASING, "MEM", "Buffer %" PRIx64 " is alised with image %" PRIx64, object_handle,
5972                       other_handle);
5973    } else {
5974        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5975                       MEMTRACK_INVALID_ALIASING, "MEM", "Image %" PRIx64 " is alised with buffer %" PRIx64, object_handle,
5976                       other_handle);
5977    }
5978}
5979
5980VkBool32 validate_memory_range(layer_data *dev_data, const vector<MEMORY_RANGE> &ranges, const MEMORY_RANGE &new_range,
5981                               VkDebugReportObjectTypeEXT object_type) {
5982    VkBool32 skip_call = false;
5983
5984    for (auto range : ranges) {
5985        if ((range.end & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)) <
5986            (new_range.start & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)))
5987            continue;
5988        if ((range.start & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)) >
5989            (new_range.end & ~(dev_data->physDevProperties.properties.limits.bufferImageGranularity - 1)))
5990            continue;
5991        skip_call |= print_memory_range_error(dev_data, new_range.handle, range.handle, object_type);
5992    }
5993    return skip_call;
5994}
5995
5996VkBool32 validate_buffer_image_aliasing(layer_data *dev_data, uint64_t handle, VkDeviceMemory mem, VkDeviceSize memoryOffset,
5997                                        VkMemoryRequirements memRequirements, vector<MEMORY_RANGE> &ranges,
5998                                        const vector<MEMORY_RANGE> &other_ranges, VkDebugReportObjectTypeEXT object_type) {
5999    MEMORY_RANGE range;
6000    range.handle = handle;
6001    range.memory = mem;
6002    range.start = memoryOffset;
6003    range.end = memoryOffset + memRequirements.size - 1;
6004    ranges.push_back(range);
6005    return validate_memory_range(dev_data, other_ranges, range, object_type);
6006}
6007
6008VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6009vkBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
6010    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6011    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6012    loader_platform_thread_lock_mutex(&globalLock);
6013    // Track objects tied to memory
6014    uint64_t buffer_handle = (uint64_t)(buffer);
6015    VkBool32 skipCall =
6016        set_mem_binding(dev_data, device, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
6017    add_object_binding_info(dev_data, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, mem);
6018    {
6019        VkMemoryRequirements memRequirements;
6020        // MTMTODO : Shouldn't this call down the chain?
6021        vkGetBufferMemoryRequirements(device, buffer, &memRequirements);
6022        skipCall |= validate_buffer_image_aliasing(dev_data, buffer_handle, mem, memoryOffset, memRequirements,
6023                                                   dev_data->memObjMap[mem].bufferRanges, dev_data->memObjMap[mem].imageRanges,
6024                                                   VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
6025    }
6026    print_mem_list(dev_data, device);
6027    loader_platform_thread_unlock_mutex(&globalLock);
6028    if (VK_FALSE == skipCall) {
6029        result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset);
6030    }
6031    return result;
6032}
6033
6034VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6035vkGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
6036    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6037    // TODO : What to track here?
6038    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
6039    my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
6040}
6041
6042VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6043vkGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
6044    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6045    // TODO : What to track here?
6046    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
6047    my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements);
6048}
6049#endif
6050VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6051vkDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
6052    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6053        ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
6054    // TODO : Clean up any internal data structures using this obj.
6055}
6056
6057VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6058vkDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
6059    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6060
6061    loader_platform_thread_lock_mutex(&globalLock);
6062
6063    my_data->shaderModuleMap.erase(shaderModule);
6064
6065    loader_platform_thread_unlock_mutex(&globalLock);
6066
6067    my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
6068}
6069
6070VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6071vkDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
6072    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator);
6073    // TODO : Clean up any internal data structures using this obj.
6074}
6075
6076VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6077vkDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
6078    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6079        ->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
6080    // TODO : Clean up any internal data structures using this obj.
6081}
6082
6083VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6084vkDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
6085    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator);
6086    // TODO : Clean up any internal data structures using this obj.
6087}
6088
6089VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6090vkDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
6091    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6092        ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
6093    // TODO : Clean up any internal data structures using this obj.
6094}
6095
6096VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6097vkDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
6098    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6099        ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
6100    // TODO : Clean up any internal data structures using this obj.
6101}
6102
6103VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6104vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
6105    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6106
6107    bool skip_call = false;
6108    loader_platform_thread_lock_mutex(&globalLock);
6109    for (uint32_t i = 0; i < commandBufferCount; i++) {
6110#if MTMERGESOURCE
6111        clear_cmd_buf_and_mem_references(dev_data, pCommandBuffers[i]);
6112#endif
6113        if (dev_data->globalInFlightCmdBuffers.count(pCommandBuffers[i])) {
6114            skip_call |=
6115                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6116                        reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6117                        "Attempt to free command buffer (%#" PRIxLEAST64 ") which is in use.",
6118                        reinterpret_cast<uint64_t>(pCommandBuffers[i]));
6119        }
6120        // Delete CB information structure, and remove from commandBufferMap
6121        auto cb = dev_data->commandBufferMap.find(pCommandBuffers[i]);
6122        if (cb != dev_data->commandBufferMap.end()) {
6123            // reset prior to delete for data clean-up
6124            resetCB(dev_data, (*cb).second->commandBuffer);
6125            delete (*cb).second;
6126            dev_data->commandBufferMap.erase(cb);
6127        }
6128
6129        // Remove commandBuffer reference from commandPoolMap
6130        dev_data->commandPoolMap[commandPool].commandBuffers.remove(pCommandBuffers[i]);
6131    }
6132#if MTMERGESOURCE
6133    printCBList(dev_data, device);
6134#endif
6135    loader_platform_thread_unlock_mutex(&globalLock);
6136
6137    if (!skip_call)
6138        dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
6139}
6140
6141VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
6142                                                                   const VkAllocationCallbacks *pAllocator,
6143                                                                   VkCommandPool *pCommandPool) {
6144    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6145
6146    VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
6147
6148    if (VK_SUCCESS == result) {
6149        loader_platform_thread_lock_mutex(&globalLock);
6150        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
6151        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
6152        loader_platform_thread_unlock_mutex(&globalLock);
6153    }
6154    return result;
6155}
6156
6157VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
6158                                                                 const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
6159
6160    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6161    VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
6162    if (result == VK_SUCCESS) {
6163        loader_platform_thread_lock_mutex(&globalLock);
6164        dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo;
6165        loader_platform_thread_unlock_mutex(&globalLock);
6166    }
6167    return result;
6168}
6169
6170VkBool32 validateCommandBuffersNotInUse(const layer_data *dev_data, VkCommandPool commandPool) {
6171    VkBool32 skipCall = VK_FALSE;
6172    auto pool_data = dev_data->commandPoolMap.find(commandPool);
6173    if (pool_data != dev_data->commandPoolMap.end()) {
6174        for (auto cmdBuffer : pool_data->second.commandBuffers) {
6175            if (dev_data->globalInFlightCmdBuffers.count(cmdBuffer)) {
6176                skipCall |=
6177                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT,
6178                            (uint64_t)(commandPool), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
6179                            "Cannot reset command pool %" PRIx64 " when allocated command buffer %" PRIx64 " is in use.",
6180                            (uint64_t)(commandPool), (uint64_t)(cmdBuffer));
6181            }
6182        }
6183    }
6184    return skipCall;
6185}
6186
6187// Destroy commandPool along with all of the commandBuffers allocated from that pool
6188VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6189vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
6190    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6191    bool commandBufferComplete = false;
6192    bool skipCall = false;
6193    loader_platform_thread_lock_mutex(&globalLock);
6194#if MTMERGESOURCE
6195    // Verify that command buffers in pool are complete (not in-flight)
6196    // MTMTODO : Merge this with code below (separate *NotInUse() call)
6197    for (auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6198         it != dev_data->commandPoolMap[commandPool].commandBuffers.end(); it++) {
6199        commandBufferComplete = VK_FALSE;
6200        skipCall = checkCBCompleted(dev_data, *it, &commandBufferComplete);
6201        if (VK_FALSE == commandBufferComplete) {
6202            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6203                                (uint64_t)(*it), __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6204                                "Destroying Command Pool 0x%" PRIxLEAST64 " before "
6205                                "its command buffer (0x%" PRIxLEAST64 ") has completed.",
6206                                (uint64_t)(commandPool), reinterpret_cast<uint64_t>(*it));
6207        }
6208    }
6209#endif
6210    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandPoolMap
6211    if (dev_data->commandPoolMap.find(commandPool) != dev_data->commandPoolMap.end()) {
6212        for (auto poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6213             poolCb != dev_data->commandPoolMap[commandPool].commandBuffers.end();) {
6214            auto del_cb = dev_data->commandBufferMap.find(*poolCb);
6215            delete (*del_cb).second;                  // delete CB info structure
6216            dev_data->commandBufferMap.erase(del_cb); // Remove this command buffer
6217            poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.erase(
6218                poolCb); // Remove CB reference from commandPoolMap's list
6219        }
6220    }
6221    dev_data->commandPoolMap.erase(commandPool);
6222
6223    loader_platform_thread_unlock_mutex(&globalLock);
6224
6225    if (VK_TRUE == validateCommandBuffersNotInUse(dev_data, commandPool))
6226        return;
6227
6228    if (!skipCall)
6229        dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
6230#if MTMERGESOURCE
6231    loader_platform_thread_lock_mutex(&globalLock);
6232    auto item = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6233    // Remove command buffers from command buffer map
6234    while (item != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6235        auto del_item = item++;
6236        delete_cmd_buf_info(dev_data, commandPool, *del_item);
6237    }
6238    dev_data->commandPoolMap.erase(commandPool);
6239    loader_platform_thread_unlock_mutex(&globalLock);
6240#endif
6241}
6242
6243VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6244vkResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
6245    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6246    bool commandBufferComplete = false;
6247    bool skipCall = false;
6248    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6249#if MTMERGESOURCE
6250    // MTMTODO : Merge this with *NotInUse() call below
6251    loader_platform_thread_lock_mutex(&globalLock);
6252    auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6253    // Verify that CB's in pool are complete (not in-flight)
6254    while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6255        skipCall = checkCBCompleted(dev_data, (*it), &commandBufferComplete);
6256        if (!commandBufferComplete) {
6257            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6258                                (uint64_t)(*it), __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6259                                "Resetting CB %p before it has completed. You must check CB "
6260                                "flag before calling vkResetCommandBuffer().",
6261                                (*it));
6262        } else {
6263            // Clear memory references at this point.
6264            clear_cmd_buf_and_mem_references(dev_data, (*it));
6265        }
6266        ++it;
6267    }
6268    loader_platform_thread_unlock_mutex(&globalLock);
6269#endif
6270    if (VK_TRUE == validateCommandBuffersNotInUse(dev_data, commandPool))
6271        return VK_ERROR_VALIDATION_FAILED_EXT;
6272
6273    if (!skipCall)
6274        result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
6275
6276    // Reset all of the CBs allocated from this pool
6277    if (VK_SUCCESS == result) {
6278        loader_platform_thread_lock_mutex(&globalLock);
6279        auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6280        while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6281            resetCB(dev_data, (*it));
6282            ++it;
6283        }
6284        loader_platform_thread_unlock_mutex(&globalLock);
6285    }
6286    return result;
6287}
6288
6289VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
6290    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6291    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6292    bool skipCall = false;
6293    loader_platform_thread_lock_mutex(&globalLock);
6294    for (uint32_t i = 0; i < fenceCount; ++i) {
6295#if MTMERGESOURCE
6296        // Reset fence state in fenceCreateInfo structure
6297        // MTMTODO : Merge with code below
6298        auto fence_item = dev_data->fenceMap.find(pFences[i]);
6299        if (fence_item != dev_data->fenceMap.end()) {
6300            // Validate fences in SIGNALED state
6301            if (!(fence_item->second.createInfo.flags & VK_FENCE_CREATE_SIGNALED_BIT)) {
6302                // TODO: I don't see a Valid Usage section for ResetFences. This behavior should be documented there.
6303                skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6304                                   (uint64_t)pFences[i], __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
6305                                   "Fence %#" PRIxLEAST64 " submitted to VkResetFences in UNSIGNALED STATE", (uint64_t)pFences[i]);
6306            } else {
6307                fence_item->second.createInfo.flags =
6308                    static_cast<VkFenceCreateFlags>(fence_item->second.createInfo.flags & ~VK_FENCE_CREATE_SIGNALED_BIT);
6309            }
6310        }
6311#endif
6312        if (dev_data->fenceMap[pFences[i]].in_use.load()) {
6313            skipCall |=
6314                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6315                        reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
6316                        "Fence %#" PRIx64 " is in use by a command buffer.", reinterpret_cast<const uint64_t &>(pFences[i]));
6317        }
6318    }
6319    loader_platform_thread_unlock_mutex(&globalLock);
6320    if (!skipCall)
6321        result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences);
6322    return result;
6323}
6324
6325VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6326vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
6327    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6328    loader_platform_thread_lock_mutex(&globalLock);
6329    auto fbNode = dev_data->frameBufferMap.find(framebuffer);
6330    if (fbNode != dev_data->frameBufferMap.end()) {
6331        for (auto cb : fbNode->second.referencingCmdBuffers) {
6332            auto cbNode = dev_data->commandBufferMap.find(cb);
6333            if (cbNode != dev_data->commandBufferMap.end()) {
6334                // Set CB as invalid and record destroyed framebuffer
6335                cbNode->second->state = CB_INVALID;
6336                cbNode->second->destroyedFramebuffers.insert(framebuffer);
6337            }
6338        }
6339        delete [] fbNode->second.createInfo.pAttachments;
6340        dev_data->frameBufferMap.erase(fbNode);
6341    }
6342    loader_platform_thread_unlock_mutex(&globalLock);
6343    dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
6344}
6345
6346VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6347vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
6348    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6349    dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
6350    loader_platform_thread_lock_mutex(&globalLock);
6351    dev_data->renderPassMap.erase(renderPass);
6352    loader_platform_thread_unlock_mutex(&globalLock);
6353}
6354
6355VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
6356                                                              const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
6357    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6358
6359    VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
6360
6361    if (VK_SUCCESS == result) {
6362        loader_platform_thread_lock_mutex(&globalLock);
6363#if MTMERGESOURCE
6364        add_object_create_info(dev_data, (uint64_t)*pBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, pCreateInfo);
6365#endif
6366        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
6367        dev_data->bufferMap[*pBuffer].create_info = unique_ptr<VkBufferCreateInfo>(new VkBufferCreateInfo(*pCreateInfo));
6368        dev_data->bufferMap[*pBuffer].in_use.store(0);
6369        loader_platform_thread_unlock_mutex(&globalLock);
6370    }
6371    return result;
6372}
6373
6374VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6375                                                                  const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
6376    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6377    VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
6378    if (VK_SUCCESS == result) {
6379        loader_platform_thread_lock_mutex(&globalLock);
6380        dev_data->bufferViewMap[*pView] = VkBufferViewCreateInfo(*pCreateInfo);
6381#if MTMERGESOURCE
6382        // In order to create a valid buffer view, the buffer must have been created with at least one of the
6383        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
6384        validate_buffer_usage_flags(dev_data, device, pCreateInfo->buffer,
6385                                    VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, VK_FALSE,
6386                                    "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
6387#endif
6388        loader_platform_thread_unlock_mutex(&globalLock);
6389    }
6390    return result;
6391}
6392
6393VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
6394                                                             const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
6395    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6396
6397    VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
6398
6399    if (VK_SUCCESS == result) {
6400        loader_platform_thread_lock_mutex(&globalLock);
6401#if MTMERGESOURCE
6402        add_object_create_info(dev_data, (uint64_t)*pImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, pCreateInfo);
6403#endif
6404        IMAGE_LAYOUT_NODE image_node;
6405        image_node.layout = pCreateInfo->initialLayout;
6406        image_node.format = pCreateInfo->format;
6407        dev_data->imageMap[*pImage].createInfo = *pCreateInfo;
6408        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
6409        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
6410        dev_data->imageLayoutMap[subpair] = image_node;
6411        loader_platform_thread_unlock_mutex(&globalLock);
6412    }
6413    return result;
6414}
6415
6416static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
6417    /* expects globalLock to be held by caller */
6418
6419    auto image_node_it = dev_data->imageMap.find(image);
6420    if (image_node_it != dev_data->imageMap.end()) {
6421        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
6422         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
6423         * the actual values.
6424         */
6425        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
6426            range->levelCount = image_node_it->second.createInfo.mipLevels - range->baseMipLevel;
6427        }
6428
6429        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
6430            range->layerCount = image_node_it->second.createInfo.arrayLayers - range->baseArrayLayer;
6431        }
6432    }
6433}
6434
6435// Return the correct layer/level counts if the caller used the special
6436// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
6437static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6438                                         VkImage image) {
6439    /* expects globalLock to be held by caller */
6440
6441    *levels = range.levelCount;
6442    *layers = range.layerCount;
6443    auto image_node_it = dev_data->imageMap.find(image);
6444    if (image_node_it != dev_data->imageMap.end()) {
6445        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
6446            *levels = image_node_it->second.createInfo.mipLevels - range.baseMipLevel;
6447        }
6448        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
6449            *layers = image_node_it->second.createInfo.arrayLayers - range.baseArrayLayer;
6450        }
6451    }
6452}
6453
6454VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6455                                                                 const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6456    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6457    VkResult result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
6458    if (VK_SUCCESS == result) {
6459        loader_platform_thread_lock_mutex(&globalLock);
6460        VkImageViewCreateInfo localCI = VkImageViewCreateInfo(*pCreateInfo);
6461        ResolveRemainingLevelsLayers(dev_data, &localCI.subresourceRange, pCreateInfo->image);
6462        dev_data->imageViewMap[*pView] = localCI;
6463#if MTMERGESOURCE
6464        // Validate that img has correct usage flags set
6465        validate_image_usage_flags(dev_data, device, pCreateInfo->image,
6466                                   VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT |
6467                                       VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6468                                   VK_FALSE, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT");
6469#endif
6470        loader_platform_thread_unlock_mutex(&globalLock);
6471    }
6472    return result;
6473}
6474
6475VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6476vkCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6477    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6478    VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
6479    if (VK_SUCCESS == result) {
6480        loader_platform_thread_lock_mutex(&globalLock);
6481        FENCE_NODE *pFN = &dev_data->fenceMap[*pFence];
6482#if MTMERGESOURCE
6483        memset(pFN, 0, sizeof(MT_FENCE_INFO));
6484        memcpy(&(pFN->createInfo), pCreateInfo, sizeof(VkFenceCreateInfo));
6485        if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
6486            pFN->firstTimeFlag = VK_TRUE;
6487        }
6488#endif
6489        pFN->in_use.store(0);
6490        loader_platform_thread_unlock_mutex(&globalLock);
6491    }
6492    return result;
6493}
6494
6495// TODO handle pipeline caches
6496VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6497                                                     const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6498    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6499    VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6500    return result;
6501}
6502
6503VKAPI_ATTR void VKAPI_CALL
6504vkDestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
6505    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6506    dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator);
6507}
6508
6509VKAPI_ATTR VkResult VKAPI_CALL
6510vkGetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
6511    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6512    VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6513    return result;
6514}
6515
6516VKAPI_ATTR VkResult VKAPI_CALL
6517vkMergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
6518    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6519    VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6520    return result;
6521}
6522
6523// utility function to set collective state for pipeline
6524void set_pipeline_state(PIPELINE_NODE *pPipe) {
6525    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
6526    if (pPipe->graphicsPipelineCI.pColorBlendState) {
6527        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
6528            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
6529                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6530                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6531                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6532                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6533                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6534                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6535                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6536                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
6537                    pPipe->blendConstantsEnabled = true;
6538                }
6539            }
6540        }
6541    }
6542}
6543
6544VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6545vkCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6546                          const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6547                          VkPipeline *pPipelines) {
6548    VkResult result = VK_SUCCESS;
6549    // TODO What to do with pipelineCache?
6550    // The order of operations here is a little convoluted but gets the job done
6551    //  1. Pipeline create state is first shadowed into PIPELINE_NODE struct
6552    //  2. Create state is then validated (which uses flags setup during shadowing)
6553    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6554    VkBool32 skipCall = VK_FALSE;
6555    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6556    vector<PIPELINE_NODE *> pPipeNode(count);
6557    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6558
6559    uint32_t i = 0;
6560    loader_platform_thread_lock_mutex(&globalLock);
6561
6562    for (i = 0; i < count; i++) {
6563        pPipeNode[i] = initGraphicsPipeline(dev_data, &pCreateInfos[i]);
6564        skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
6565    }
6566
6567    if (VK_FALSE == skipCall) {
6568        loader_platform_thread_unlock_mutex(&globalLock);
6569        result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6570                                                                          pPipelines);
6571        loader_platform_thread_lock_mutex(&globalLock);
6572        for (i = 0; i < count; i++) {
6573            pPipeNode[i]->pipeline = pPipelines[i];
6574            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6575        }
6576        loader_platform_thread_unlock_mutex(&globalLock);
6577    } else {
6578        for (i = 0; i < count; i++) {
6579            delete pPipeNode[i];
6580        }
6581        loader_platform_thread_unlock_mutex(&globalLock);
6582        return VK_ERROR_VALIDATION_FAILED_EXT;
6583    }
6584    return result;
6585}
6586
6587VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6588vkCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6589                         const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6590                         VkPipeline *pPipelines) {
6591    VkResult result = VK_SUCCESS;
6592    VkBool32 skipCall = VK_FALSE;
6593
6594    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6595    vector<PIPELINE_NODE *> pPipeNode(count);
6596    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6597
6598    uint32_t i = 0;
6599    loader_platform_thread_lock_mutex(&globalLock);
6600    for (i = 0; i < count; i++) {
6601        // TODO: Verify compute stage bits
6602
6603        // Create and initialize internal tracking data structure
6604        pPipeNode[i] = new PIPELINE_NODE;
6605        memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
6606
6607        // TODO: Add Compute Pipeline Verification
6608        // skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
6609    }
6610
6611    if (VK_FALSE == skipCall) {
6612        loader_platform_thread_unlock_mutex(&globalLock);
6613        result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6614                                                                         pPipelines);
6615        loader_platform_thread_lock_mutex(&globalLock);
6616        for (i = 0; i < count; i++) {
6617            pPipeNode[i]->pipeline = pPipelines[i];
6618            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6619        }
6620        loader_platform_thread_unlock_mutex(&globalLock);
6621    } else {
6622        for (i = 0; i < count; i++) {
6623            // Clean up any locally allocated data structures
6624            delete pPipeNode[i];
6625        }
6626        loader_platform_thread_unlock_mutex(&globalLock);
6627        return VK_ERROR_VALIDATION_FAILED_EXT;
6628    }
6629    return result;
6630}
6631
6632VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6633                                                               const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6634    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6635    VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6636    if (VK_SUCCESS == result) {
6637        loader_platform_thread_lock_mutex(&globalLock);
6638        dev_data->sampleMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
6639        loader_platform_thread_unlock_mutex(&globalLock);
6640    }
6641    return result;
6642}
6643
6644VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6645vkCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6646                            const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
6647    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6648    VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6649    if (VK_SUCCESS == result) {
6650        // TODOSC : Capture layout bindings set
6651        LAYOUT_NODE *pNewNode = new LAYOUT_NODE;
6652        if (NULL == pNewNode) {
6653            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
6654                        (uint64_t)*pSetLayout, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6655                        "Out of memory while attempting to allocate LAYOUT_NODE in vkCreateDescriptorSetLayout()"))
6656                return VK_ERROR_VALIDATION_FAILED_EXT;
6657        }
6658        memcpy((void *)&pNewNode->createInfo, pCreateInfo, sizeof(VkDescriptorSetLayoutCreateInfo));
6659        pNewNode->createInfo.pBindings = new VkDescriptorSetLayoutBinding[pCreateInfo->bindingCount];
6660        memcpy((void *)pNewNode->createInfo.pBindings, pCreateInfo->pBindings,
6661               sizeof(VkDescriptorSetLayoutBinding) * pCreateInfo->bindingCount);
6662        // g++ does not like reserve with size 0
6663        if (pCreateInfo->bindingCount)
6664            pNewNode->bindingToIndexMap.reserve(pCreateInfo->bindingCount);
6665        uint32_t totalCount = 0;
6666        for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
6667            if (!pNewNode->bindingToIndexMap.emplace(pCreateInfo->pBindings[i].binding, i).second) {
6668                if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6669                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)*pSetLayout, __LINE__,
6670                            DRAWSTATE_INVALID_LAYOUT, "DS", "duplicated binding number in "
6671                                                            "VkDescriptorSetLayoutBinding"))
6672                    return VK_ERROR_VALIDATION_FAILED_EXT;
6673            } else {
6674                pNewNode->bindingToIndexMap[pCreateInfo->pBindings[i].binding] = i;
6675            }
6676            totalCount += pCreateInfo->pBindings[i].descriptorCount;
6677            if (pCreateInfo->pBindings[i].pImmutableSamplers) {
6678                VkSampler **ppIS = (VkSampler **)&pNewNode->createInfo.pBindings[i].pImmutableSamplers;
6679                *ppIS = new VkSampler[pCreateInfo->pBindings[i].descriptorCount];
6680                memcpy(*ppIS, pCreateInfo->pBindings[i].pImmutableSamplers,
6681                       pCreateInfo->pBindings[i].descriptorCount * sizeof(VkSampler));
6682            }
6683        }
6684        pNewNode->layout = *pSetLayout;
6685        pNewNode->startIndex = 0;
6686        if (totalCount > 0) {
6687            pNewNode->descriptorTypes.resize(totalCount);
6688            pNewNode->stageFlags.resize(totalCount);
6689            uint32_t offset = 0;
6690            uint32_t j = 0;
6691            VkDescriptorType dType;
6692            for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
6693                dType = pCreateInfo->pBindings[i].descriptorType;
6694                for (j = 0; j < pCreateInfo->pBindings[i].descriptorCount; j++) {
6695                    pNewNode->descriptorTypes[offset + j] = dType;
6696                    pNewNode->stageFlags[offset + j] = pCreateInfo->pBindings[i].stageFlags;
6697                    if ((dType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
6698                        (dType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
6699                        pNewNode->dynamicDescriptorCount++;
6700                    }
6701                }
6702                offset += j;
6703            }
6704            pNewNode->endIndex = pNewNode->startIndex + totalCount - 1;
6705        } else { // no descriptors
6706            pNewNode->endIndex = 0;
6707        }
6708        // Put new node at Head of global Layer list
6709        loader_platform_thread_lock_mutex(&globalLock);
6710        dev_data->descriptorSetLayoutMap[*pSetLayout] = pNewNode;
6711        loader_platform_thread_unlock_mutex(&globalLock);
6712    }
6713    return result;
6714}
6715
6716static bool validatePushConstantSize(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6717                                     const char *caller_name) {
6718    bool skipCall = false;
6719    if ((offset + size) > dev_data->physDevProperties.properties.limits.maxPushConstantsSize) {
6720        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6721                           DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
6722                                                                 "exceeds this device's maxPushConstantSize of %u.",
6723                           caller_name, offset, size, dev_data->physDevProperties.properties.limits.maxPushConstantsSize);
6724    }
6725    return skipCall;
6726}
6727
6728VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6729                                                      const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
6730    bool skipCall = false;
6731    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6732    uint32_t i = 0;
6733    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6734        skipCall |= validatePushConstantSize(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
6735                                             pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()");
6736        if ((pCreateInfo->pPushConstantRanges[i].size == 0) || ((pCreateInfo->pPushConstantRanges[i].size & 0x3) != 0)) {
6737            skipCall |=
6738                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6739                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constant index %u with "
6740                                                              "size %u. Size must be greater than zero and a multiple of 4.",
6741                        i, pCreateInfo->pPushConstantRanges[i].size);
6742        }
6743        // TODO : Add warning if ranges overlap
6744    }
6745    VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
6746    if (VK_SUCCESS == result) {
6747        loader_platform_thread_lock_mutex(&globalLock);
6748        // TODOSC : Merge capture of the setLayouts per pipeline
6749        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
6750        plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount);
6751        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6752            plNode.descriptorSetLayouts[i] = pCreateInfo->pSetLayouts[i];
6753        }
6754        plNode.pushConstantRanges.resize(pCreateInfo->pushConstantRangeCount);
6755        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6756            plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i];
6757        }
6758        loader_platform_thread_unlock_mutex(&globalLock);
6759    }
6760    return result;
6761}
6762
6763VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6764vkCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
6765                       VkDescriptorPool *pDescriptorPool) {
6766    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6767    VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
6768    if (VK_SUCCESS == result) {
6769        // Insert this pool into Global Pool LL at head
6770        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6771                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool %#" PRIxLEAST64,
6772                    (uint64_t)*pDescriptorPool))
6773            return VK_ERROR_VALIDATION_FAILED_EXT;
6774        DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
6775        if (NULL == pNewNode) {
6776            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6777                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6778                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
6779                return VK_ERROR_VALIDATION_FAILED_EXT;
6780        } else {
6781            loader_platform_thread_lock_mutex(&globalLock);
6782            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6783            loader_platform_thread_unlock_mutex(&globalLock);
6784        }
6785    } else {
6786        // Need to do anything if pool create fails?
6787    }
6788    return result;
6789}
6790
6791VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6792vkResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
6793    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6794    VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags);
6795    if (VK_SUCCESS == result) {
6796        loader_platform_thread_lock_mutex(&globalLock);
6797        clearDescriptorPool(dev_data, device, descriptorPool, flags);
6798        loader_platform_thread_unlock_mutex(&globalLock);
6799    }
6800    return result;
6801}
6802
6803VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6804vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
6805    VkBool32 skipCall = VK_FALSE;
6806    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6807
6808    loader_platform_thread_lock_mutex(&globalLock);
6809    // Verify that requested descriptorSets are available in pool
6810    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6811    if (!pPoolNode) {
6812        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6813                            (uint64_t)pAllocateInfo->descriptorPool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
6814                            "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
6815                            (uint64_t)pAllocateInfo->descriptorPool);
6816    } else { // Make sure pool has all the available descriptors before calling down chain
6817        skipCall |= validate_descriptor_availability_in_pool(dev_data, pPoolNode, pAllocateInfo->descriptorSetCount,
6818                                                             pAllocateInfo->pSetLayouts);
6819    }
6820    loader_platform_thread_unlock_mutex(&globalLock);
6821    if (skipCall)
6822        return VK_ERROR_VALIDATION_FAILED_EXT;
6823    VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6824    if (VK_SUCCESS == result) {
6825        loader_platform_thread_lock_mutex(&globalLock);
6826        DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6827        if (pPoolNode) {
6828            if (pAllocateInfo->descriptorSetCount == 0) {
6829                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6830                        pAllocateInfo->descriptorSetCount, __LINE__, DRAWSTATE_NONE, "DS",
6831                        "AllocateDescriptorSets called with 0 count");
6832            }
6833            for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
6834                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6835                        (uint64_t)pDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS", "Created Descriptor Set %#" PRIxLEAST64,
6836                        (uint64_t)pDescriptorSets[i]);
6837                // Create new set node and add to head of pool nodes
6838                SET_NODE *pNewNode = new SET_NODE;
6839                if (NULL == pNewNode) {
6840                    if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6841                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6842                                DRAWSTATE_OUT_OF_MEMORY, "DS",
6843                                "Out of memory while attempting to allocate SET_NODE in vkAllocateDescriptorSets()")) {
6844                        loader_platform_thread_unlock_mutex(&globalLock);
6845                        return VK_ERROR_VALIDATION_FAILED_EXT;
6846                    }
6847                } else {
6848                    // TODO : Pool should store a total count of each type of Descriptor available
6849                    //  When descriptors are allocated, decrement the count and validate here
6850                    //  that the count doesn't go below 0. One reset/free need to bump count back up.
6851                    // Insert set at head of Set LL for this pool
6852                    pNewNode->pNext = pPoolNode->pSets;
6853                    pNewNode->in_use.store(0);
6854                    pPoolNode->pSets = pNewNode;
6855                    LAYOUT_NODE *pLayout = getLayoutNode(dev_data, pAllocateInfo->pSetLayouts[i]);
6856                    if (NULL == pLayout) {
6857                        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6858                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pAllocateInfo->pSetLayouts[i],
6859                                    __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
6860                                    "Unable to find set layout node for layout %#" PRIxLEAST64
6861                                    " specified in vkAllocateDescriptorSets() call",
6862                                    (uint64_t)pAllocateInfo->pSetLayouts[i])) {
6863                            loader_platform_thread_unlock_mutex(&globalLock);
6864                            return VK_ERROR_VALIDATION_FAILED_EXT;
6865                        }
6866                    }
6867                    pNewNode->pLayout = pLayout;
6868                    pNewNode->pool = pAllocateInfo->descriptorPool;
6869                    pNewNode->set = pDescriptorSets[i];
6870                    pNewNode->descriptorCount = (pLayout->createInfo.bindingCount != 0) ? pLayout->endIndex + 1 : 0;
6871                    if (pNewNode->descriptorCount) {
6872                        pNewNode->pDescriptorUpdates.resize(pNewNode->descriptorCount);
6873                    }
6874                    dev_data->setMap[pDescriptorSets[i]] = pNewNode;
6875                }
6876            }
6877        }
6878        loader_platform_thread_unlock_mutex(&globalLock);
6879    }
6880    return result;
6881}
6882
6883VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6884vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6885    VkBool32 skipCall = VK_FALSE;
6886    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6887    // Make sure that no sets being destroyed are in-flight
6888    loader_platform_thread_lock_mutex(&globalLock);
6889    for (uint32_t i = 0; i < count; ++i)
6890        skipCall |= validateIdleDescriptorSet(dev_data, pDescriptorSets[i], "vkFreeDesriptorSets");
6891    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, descriptorPool);
6892    if (pPoolNode && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pPoolNode->createInfo.flags)) {
6893        // Can't Free from a NON_FREE pool
6894        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
6895                            (uint64_t)device, __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
6896                            "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6897                            "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
6898    }
6899    loader_platform_thread_unlock_mutex(&globalLock);
6900    if (VK_FALSE != skipCall)
6901        return VK_ERROR_VALIDATION_FAILED_EXT;
6902    VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6903    if (VK_SUCCESS == result) {
6904        loader_platform_thread_lock_mutex(&globalLock);
6905
6906        // Update available descriptor sets in pool
6907        pPoolNode->availableSets += count;
6908
6909        // For each freed descriptor add it back into the pool as available
6910        for (uint32_t i = 0; i < count; ++i) {
6911            SET_NODE *pSet = dev_data->setMap[pDescriptorSets[i]]; // getSetNode() without locking
6912            invalidateBoundCmdBuffers(dev_data, pSet);
6913            LAYOUT_NODE *pLayout = pSet->pLayout;
6914            uint32_t typeIndex = 0, poolSizeCount = 0;
6915            for (uint32_t j = 0; j < pLayout->createInfo.bindingCount; ++j) {
6916                typeIndex = static_cast<uint32_t>(pLayout->createInfo.pBindings[j].descriptorType);
6917                poolSizeCount = pLayout->createInfo.pBindings[j].descriptorCount;
6918                pPoolNode->availableDescriptorTypeCount[typeIndex] += poolSizeCount;
6919            }
6920        }
6921        loader_platform_thread_unlock_mutex(&globalLock);
6922    }
6923    // TODO : Any other clean-up or book-keeping to do here?
6924    return result;
6925}
6926
6927VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6928vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6929                       uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6930    // dsUpdate will return VK_TRUE only if a bailout error occurs, so we want to call down tree when update returns VK_FALSE
6931    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6932    loader_platform_thread_lock_mutex(&globalLock);
6933    VkBool32 rtn = dsUpdate(dev_data, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
6934    loader_platform_thread_unlock_mutex(&globalLock);
6935    if (!rtn) {
6936        dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6937                                                              pDescriptorCopies);
6938    }
6939}
6940
6941VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6942vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6943    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6944    VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6945    if (VK_SUCCESS == result) {
6946        loader_platform_thread_lock_mutex(&globalLock);
6947        auto const &cp_it = dev_data->commandPoolMap.find(pCreateInfo->commandPool);
6948        if (cp_it != dev_data->commandPoolMap.end()) {
6949            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6950                // Add command buffer to its commandPool map
6951                cp_it->second.commandBuffers.push_back(pCommandBuffer[i]);
6952                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6953                // Add command buffer to map
6954                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6955                resetCB(dev_data, pCommandBuffer[i]);
6956                pCB->createInfo = *pCreateInfo;
6957                pCB->device = device;
6958            }
6959        }
6960#if MTMERGESOURCE
6961        printCBList(dev_data, device);
6962#endif
6963        loader_platform_thread_unlock_mutex(&globalLock);
6964    }
6965    return result;
6966}
6967
6968VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6969vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6970    VkBool32 skipCall = VK_FALSE;
6971    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6972    loader_platform_thread_lock_mutex(&globalLock);
6973    // Validate command buffer level
6974    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6975    if (pCB) {
6976#if MTMERGESOURCE
6977        bool commandBufferComplete = false;
6978        // MTMTODO : Merge this with code below
6979        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6980        skipCall = checkCBCompleted(dev_data, commandBuffer, &commandBufferComplete);
6981
6982        if (!commandBufferComplete) {
6983            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6984                                (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6985                                "Calling vkBeginCommandBuffer() on active CB %p before it has completed. "
6986                                "You must check CB flag before this call.",
6987                                commandBuffer);
6988        }
6989#endif
6990        if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6991            // Secondary Command Buffer
6992            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6993            if (!pInfo) {
6994                skipCall |=
6995                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6996                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6997                            "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must have inheritance info.",
6998                            reinterpret_cast<void *>(commandBuffer));
6999            } else {
7000                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
7001                    if (!pInfo->renderPass) { // renderpass should NOT be null for an Secondary CB
7002                        skipCall |= log_msg(
7003                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7004                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7005                            "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must specify a valid renderpass parameter.",
7006                            reinterpret_cast<void *>(commandBuffer));
7007                    }
7008                    if (!pInfo->framebuffer) { // framebuffer may be null for an Secondary CB, but this affects perf
7009                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7010                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7011                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE,
7012                                            "DS", "vkBeginCommandBuffer(): Secondary Command Buffers (%p) may perform better if a "
7013                                                  "valid framebuffer parameter is specified.",
7014                                            reinterpret_cast<void *>(commandBuffer));
7015                    } else {
7016                        string errorString = "";
7017                        auto fbNode = dev_data->frameBufferMap.find(pInfo->framebuffer);
7018                        if (fbNode != dev_data->frameBufferMap.end()) {
7019                            VkRenderPass fbRP = fbNode->second.createInfo.renderPass;
7020                            if (!verify_renderpass_compatibility(dev_data, fbRP, pInfo->renderPass, errorString)) {
7021                                // renderPass that framebuffer was created with
7022                                // must
7023                                // be compatible with local renderPass
7024                                skipCall |=
7025                                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7026                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7027                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
7028                                            "DS", "vkBeginCommandBuffer(): Secondary Command "
7029                                                  "Buffer (%p) renderPass (%#" PRIxLEAST64 ") is incompatible w/ framebuffer "
7030                                                  "(%#" PRIxLEAST64 ") w/ render pass (%#" PRIxLEAST64 ") due to: %s",
7031                                            reinterpret_cast<void *>(commandBuffer), (uint64_t)(pInfo->renderPass),
7032                                            (uint64_t)(pInfo->framebuffer), (uint64_t)(fbRP), errorString.c_str());
7033                            }
7034                            // Connect this framebuffer to this cmdBuffer
7035                            fbNode->second.referencingCmdBuffers.insert(pCB->commandBuffer);
7036                        }
7037                    }
7038                }
7039                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
7040                     dev_data->physDevProperties.features.occlusionQueryPrecise == VK_FALSE) &&
7041                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
7042                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7043                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
7044                                        __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7045                                        "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must not have "
7046                                        "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
7047                                        "support precise occlusion queries.",
7048                                        reinterpret_cast<void *>(commandBuffer));
7049                }
7050            }
7051            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
7052                auto rp_data = dev_data->renderPassMap.find(pInfo->renderPass);
7053                if (rp_data != dev_data->renderPassMap.end() && rp_data->second && rp_data->second->pCreateInfo) {
7054                    if (pInfo->subpass >= rp_data->second->pCreateInfo->subpassCount) {
7055                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7056                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7057                                            DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7058                                            "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must has a subpass index (%d) "
7059                                            "that is less than the number of subpasses (%d).",
7060                                            (void *)commandBuffer, pInfo->subpass, rp_data->second->pCreateInfo->subpassCount);
7061                    }
7062                }
7063            }
7064        }
7065        if (CB_RECORDING == pCB->state) {
7066            skipCall |=
7067                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7068                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7069                        "vkBeginCommandBuffer(): Cannot call Begin on CB (%#" PRIxLEAST64
7070                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
7071                        (uint64_t)commandBuffer);
7072        } else if (CB_RECORDED == pCB->state) {
7073            VkCommandPool cmdPool = pCB->createInfo.commandPool;
7074            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
7075                skipCall |=
7076                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7077                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7078                            "Call to vkBeginCommandBuffer() on command buffer (%#" PRIxLEAST64
7079                            ") attempts to implicitly reset cmdBuffer created from command pool (%#" PRIxLEAST64
7080                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7081                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
7082            }
7083            resetCB(dev_data, commandBuffer);
7084        }
7085        // Set updated state here in case implicit reset occurs above
7086        pCB->state = CB_RECORDING;
7087        pCB->beginInfo = *pBeginInfo;
7088        if (pCB->beginInfo.pInheritanceInfo) {
7089            pCB->inheritanceInfo = *(pCB->beginInfo.pInheritanceInfo);
7090            pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo;
7091        }
7092    } else {
7093        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7094                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
7095                            "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB %p!", (void *)commandBuffer);
7096    }
7097    loader_platform_thread_unlock_mutex(&globalLock);
7098    if (VK_FALSE != skipCall) {
7099        return VK_ERROR_VALIDATION_FAILED_EXT;
7100    }
7101    VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo);
7102#if MTMERGESOURCE
7103    loader_platform_thread_lock_mutex(&globalLock);
7104    clear_cmd_buf_and_mem_references(dev_data, commandBuffer);
7105    loader_platform_thread_unlock_mutex(&globalLock);
7106#endif
7107    return result;
7108}
7109
7110VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffer commandBuffer) {
7111    VkBool32 skipCall = VK_FALSE;
7112    VkResult result = VK_SUCCESS;
7113    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7114    loader_platform_thread_lock_mutex(&globalLock);
7115    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7116    if (pCB) {
7117        if (pCB->state != CB_RECORDING) {
7118            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkEndCommandBuffer()");
7119        }
7120        for (auto query : pCB->activeQueries) {
7121            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7122                                DRAWSTATE_INVALID_QUERY, "DS",
7123                                "Ending command buffer with in progress query: queryPool %" PRIu64 ", index %d",
7124                                (uint64_t)(query.pool), query.index);
7125        }
7126    }
7127    if (VK_FALSE == skipCall) {
7128        loader_platform_thread_unlock_mutex(&globalLock);
7129        result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
7130        loader_platform_thread_lock_mutex(&globalLock);
7131        if (VK_SUCCESS == result) {
7132            pCB->state = CB_RECORDED;
7133            // Reset CB status flags
7134            pCB->status = 0;
7135            printCB(dev_data, commandBuffer);
7136        }
7137    } else {
7138        result = VK_ERROR_VALIDATION_FAILED_EXT;
7139    }
7140    loader_platform_thread_unlock_mutex(&globalLock);
7141    return result;
7142}
7143
7144VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
7145vkResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
7146    VkBool32 skipCall = VK_FALSE;
7147    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7148    loader_platform_thread_lock_mutex(&globalLock);
7149#if MTMERGESOURCE
7150    bool commandBufferComplete = false;
7151    // Verify that CB is complete (not in-flight)
7152    skipCall = checkCBCompleted(dev_data, commandBuffer, &commandBufferComplete);
7153    if (!commandBufferComplete) {
7154        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7155                            (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
7156                            "Resetting CB %p before it has completed. You must check CB "
7157                            "flag before calling vkResetCommandBuffer().",
7158                            commandBuffer);
7159    }
7160    // Clear memory references as this point.
7161    clear_cmd_buf_and_mem_references(dev_data, commandBuffer);
7162#endif
7163    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7164    VkCommandPool cmdPool = pCB->createInfo.commandPool;
7165    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
7166        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7167                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7168                            "Attempt to reset command buffer (%#" PRIxLEAST64 ") created from command pool (%#" PRIxLEAST64
7169                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7170                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
7171    }
7172    if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
7173        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7174                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7175                            "Attempt to reset command buffer (%#" PRIxLEAST64 ") which is in use.",
7176                            reinterpret_cast<uint64_t>(commandBuffer));
7177    }
7178    loader_platform_thread_unlock_mutex(&globalLock);
7179    if (skipCall != VK_FALSE)
7180        return VK_ERROR_VALIDATION_FAILED_EXT;
7181    VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags);
7182    if (VK_SUCCESS == result) {
7183        loader_platform_thread_lock_mutex(&globalLock);
7184        resetCB(dev_data, commandBuffer);
7185        loader_platform_thread_unlock_mutex(&globalLock);
7186    }
7187    return result;
7188}
7189#if MTMERGESOURCE
7190// TODO : For any vkCmdBind* calls that include an object which has mem bound to it,
7191//    need to account for that mem now having binding to given commandBuffer
7192#endif
7193VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7194vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
7195    VkBool32 skipCall = VK_FALSE;
7196    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7197    loader_platform_thread_lock_mutex(&globalLock);
7198    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7199    if (pCB) {
7200        skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
7201        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
7202            skipCall |=
7203                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7204                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
7205                        "Incorrectly binding compute pipeline (%#" PRIxLEAST64 ") during active RenderPass (%#" PRIxLEAST64 ")",
7206                        (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass);
7207        }
7208
7209        PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
7210        if (pPN) {
7211            pCB->lastBound[pipelineBindPoint].pipeline = pipeline;
7212            set_cb_pso_status(pCB, pPN);
7213            set_pipeline_state(pPN);
7214            skipCall |= validatePipelineState(dev_data, pCB, pipelineBindPoint, pipeline);
7215        } else {
7216            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7217                                (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
7218                                "Attempt to bind Pipeline %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
7219        }
7220    }
7221    loader_platform_thread_unlock_mutex(&globalLock);
7222    if (VK_FALSE == skipCall)
7223        dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
7224}
7225
7226VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7227vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
7228    VkBool32 skipCall = VK_FALSE;
7229    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7230    loader_platform_thread_lock_mutex(&globalLock);
7231    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7232    if (pCB) {
7233        skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
7234        pCB->status |= CBSTATUS_VIEWPORT_SET;
7235        pCB->viewports.resize(viewportCount);
7236        memcpy(pCB->viewports.data(), pViewports, viewportCount * sizeof(VkViewport));
7237    }
7238    loader_platform_thread_unlock_mutex(&globalLock);
7239    if (VK_FALSE == skipCall)
7240        dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
7241}
7242
7243VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7244vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
7245    VkBool32 skipCall = VK_FALSE;
7246    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7247    loader_platform_thread_lock_mutex(&globalLock);
7248    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7249    if (pCB) {
7250        skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
7251        pCB->status |= CBSTATUS_SCISSOR_SET;
7252        pCB->scissors.resize(scissorCount);
7253        memcpy(pCB->scissors.data(), pScissors, scissorCount * sizeof(VkRect2D));
7254    }
7255    loader_platform_thread_unlock_mutex(&globalLock);
7256    if (VK_FALSE == skipCall)
7257        dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
7258}
7259
7260VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
7261    VkBool32 skipCall = VK_FALSE;
7262    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7263    loader_platform_thread_lock_mutex(&globalLock);
7264    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7265    if (pCB) {
7266        skipCall |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
7267        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
7268    }
7269    loader_platform_thread_unlock_mutex(&globalLock);
7270    if (VK_FALSE == skipCall)
7271        dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth);
7272}
7273
7274VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7275vkCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
7276    VkBool32 skipCall = VK_FALSE;
7277    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7278    loader_platform_thread_lock_mutex(&globalLock);
7279    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7280    if (pCB) {
7281        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
7282        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
7283    }
7284    loader_platform_thread_unlock_mutex(&globalLock);
7285    if (VK_FALSE == skipCall)
7286        dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp,
7287                                                         depthBiasSlopeFactor);
7288}
7289
7290VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
7291    VkBool32 skipCall = VK_FALSE;
7292    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7293    loader_platform_thread_lock_mutex(&globalLock);
7294    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7295    if (pCB) {
7296        skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
7297        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
7298    }
7299    loader_platform_thread_unlock_mutex(&globalLock);
7300    if (VK_FALSE == skipCall)
7301        dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants);
7302}
7303
7304VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7305vkCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
7306    VkBool32 skipCall = VK_FALSE;
7307    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7308    loader_platform_thread_lock_mutex(&globalLock);
7309    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7310    if (pCB) {
7311        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
7312        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
7313    }
7314    loader_platform_thread_unlock_mutex(&globalLock);
7315    if (VK_FALSE == skipCall)
7316        dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
7317}
7318
7319VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7320vkCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
7321    VkBool32 skipCall = VK_FALSE;
7322    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7323    loader_platform_thread_lock_mutex(&globalLock);
7324    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7325    if (pCB) {
7326        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
7327        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
7328    }
7329    loader_platform_thread_unlock_mutex(&globalLock);
7330    if (VK_FALSE == skipCall)
7331        dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
7332}
7333
7334VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7335vkCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
7336    VkBool32 skipCall = VK_FALSE;
7337    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7338    loader_platform_thread_lock_mutex(&globalLock);
7339    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7340    if (pCB) {
7341        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7342        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7343    }
7344    loader_platform_thread_unlock_mutex(&globalLock);
7345    if (VK_FALSE == skipCall)
7346        dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
7347}
7348
7349VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7350vkCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
7351    VkBool32 skipCall = VK_FALSE;
7352    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7353    loader_platform_thread_lock_mutex(&globalLock);
7354    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7355    if (pCB) {
7356        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7357        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7358    }
7359    loader_platform_thread_unlock_mutex(&globalLock);
7360    if (VK_FALSE == skipCall)
7361        dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference);
7362}
7363
7364VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7365vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
7366                        uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7367                        const uint32_t *pDynamicOffsets) {
7368    VkBool32 skipCall = VK_FALSE;
7369    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7370    loader_platform_thread_lock_mutex(&globalLock);
7371    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7372    if (pCB) {
7373        if (pCB->state == CB_RECORDING) {
7374            // Track total count of dynamic descriptor types to make sure we have an offset for each one
7375            uint32_t totalDynamicDescriptors = 0;
7376            string errorString = "";
7377            uint32_t lastSetIndex = firstSet + setCount - 1;
7378            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size())
7379                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7380            VkDescriptorSet oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
7381            for (uint32_t i = 0; i < setCount; i++) {
7382                SET_NODE *pSet = getSetNode(dev_data, pDescriptorSets[i]);
7383                if (pSet) {
7384                    pCB->lastBound[pipelineBindPoint].uniqueBoundSets.insert(pDescriptorSets[i]);
7385                    pSet->boundCmdBuffers.insert(commandBuffer);
7386                    pCB->lastBound[pipelineBindPoint].pipelineLayout = layout;
7387                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pDescriptorSets[i];
7388                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7389                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7390                                        DRAWSTATE_NONE, "DS", "DS %#" PRIxLEAST64 " bound on pipeline %s",
7391                                        (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
7392                    if (!pSet->pUpdateStructs && (pSet->descriptorCount != 0)) {
7393                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7394                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
7395                                            __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
7396                                            "DS %#" PRIxLEAST64
7397                                            " bound but it was never updated. You may want to either update it or not bind it.",
7398                                            (uint64_t)pDescriptorSets[i]);
7399                    }
7400                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
7401                    if (!verify_set_layout_compatibility(dev_data, pSet, layout, i + firstSet, errorString)) {
7402                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7403                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
7404                                            __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
7405                                            "descriptorSet #%u being bound is not compatible with overlapping layout in "
7406                                            "pipelineLayout due to: %s",
7407                                            i, errorString.c_str());
7408                    }
7409                    if (pSet->pLayout->dynamicDescriptorCount) {
7410                        // First make sure we won't overstep bounds of pDynamicOffsets array
7411                        if ((totalDynamicDescriptors + pSet->pLayout->dynamicDescriptorCount) > dynamicOffsetCount) {
7412                            skipCall |=
7413                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7414                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7415                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7416                                        "descriptorSet #%u (%#" PRIxLEAST64
7417                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7418                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7419                                        i, (uint64_t)pDescriptorSets[i], pSet->pLayout->dynamicDescriptorCount,
7420                                        (dynamicOffsetCount - totalDynamicDescriptors));
7421                        } else { // Validate and store dynamic offsets with the set
7422                            // Validate Dynamic Offset Minimums
7423                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
7424                            for (uint32_t d = 0; d < pSet->descriptorCount; d++) {
7425                                if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7426                                    if (vk_safe_modulo(
7427                                            pDynamicOffsets[cur_dyn_offset],
7428                                            dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment) !=
7429                                        0) {
7430                                        skipCall |= log_msg(
7431                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7432                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7433                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
7434                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7435                                            "device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64,
7436                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7437                                            dev_data->physDevProperties.properties.limits.minUniformBufferOffsetAlignment);
7438                                    }
7439                                    cur_dyn_offset++;
7440                                } else if (pSet->pLayout->descriptorTypes[d] == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7441                                    if (vk_safe_modulo(
7442                                            pDynamicOffsets[cur_dyn_offset],
7443                                            dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment) !=
7444                                        0) {
7445                                        skipCall |= log_msg(
7446                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7447                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7448                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
7449                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7450                                            "device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64,
7451                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7452                                            dev_data->physDevProperties.properties.limits.minStorageBufferOffsetAlignment);
7453                                    }
7454                                    cur_dyn_offset++;
7455                                }
7456                            }
7457                            // Keep running total of dynamic descriptor count to verify at the end
7458                            totalDynamicDescriptors += pSet->pLayout->dynamicDescriptorCount;
7459                        }
7460                    }
7461                } else {
7462                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7463                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7464                                        DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS %#" PRIxLEAST64 " that doesn't exist!",
7465                                        (uint64_t)pDescriptorSets[i]);
7466                }
7467                skipCall |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7468                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7469                if (firstSet > 0) { // Check set #s below the first bound set
7470                    for (uint32_t i = 0; i < firstSet; ++i) {
7471                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7472                            !verify_set_layout_compatibility(
7473                                dev_data, dev_data->setMap[pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i]], layout, i,
7474                                errorString)) {
7475                            skipCall |= log_msg(
7476                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7477                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7478                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7479                                "DescriptorSetDS %#" PRIxLEAST64
7480                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7481                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7482                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7483                        }
7484                    }
7485                }
7486                // Check if newly last bound set invalidates any remaining bound sets
7487                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7488                    if (oldFinalBoundSet &&
7489                        !verify_set_layout_compatibility(dev_data, dev_data->setMap[oldFinalBoundSet], layout, lastSetIndex,
7490                                                         errorString)) {
7491                        skipCall |=
7492                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7493                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)oldFinalBoundSet, __LINE__,
7494                                    DRAWSTATE_NONE, "DS", "DescriptorSetDS %#" PRIxLEAST64
7495                                                          " previously bound as set #%u is incompatible with set %#" PRIxLEAST64
7496                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
7497                                                          "disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7498                                    (uint64_t)oldFinalBoundSet, lastSetIndex,
7499                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7500                                    lastSetIndex + 1, (uint64_t)layout);
7501                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7502                    }
7503                }
7504                // Save dynamicOffsets bound to this CB
7505                for (uint32_t i = 0; i < dynamicOffsetCount; i++) {
7506                    pCB->lastBound[pipelineBindPoint].dynamicOffsets.push_back(pDynamicOffsets[i]);
7507                }
7508            }
7509            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7510            if (totalDynamicDescriptors != dynamicOffsetCount) {
7511                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7512                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7513                                    DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7514                                    "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7515                                    "is %u. It should exactly match the number of dynamic descriptors.",
7516                                    setCount, totalDynamicDescriptors, dynamicOffsetCount);
7517            }
7518            // Save dynamicOffsets bound to this CB
7519            for (uint32_t i = 0; i < dynamicOffsetCount; i++) {
7520                pCB->dynamicOffsets.emplace_back(pDynamicOffsets[i]);
7521            }
7522        } else {
7523            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7524        }
7525    }
7526    loader_platform_thread_unlock_mutex(&globalLock);
7527    if (VK_FALSE == skipCall)
7528        dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7529                                                               pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7530}
7531
7532VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7533vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
7534    VkBool32 skipCall = VK_FALSE;
7535    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7536    loader_platform_thread_lock_mutex(&globalLock);
7537#if MTMERGESOURCE
7538    VkDeviceMemory mem;
7539    skipCall =
7540        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7541    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7542    if (cb_data != dev_data->commandBufferMap.end()) {
7543        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindIndexBuffer()"); };
7544        cb_data->second->validate_functions.push_back(function);
7545    }
7546    // TODO : Somewhere need to verify that IBs have correct usage state flagged
7547#endif
7548    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7549    if (pCB) {
7550        skipCall |= addCmd(dev_data, pCB, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7551        VkDeviceSize offset_align = 0;
7552        switch (indexType) {
7553        case VK_INDEX_TYPE_UINT16:
7554            offset_align = 2;
7555            break;
7556        case VK_INDEX_TYPE_UINT32:
7557            offset_align = 4;
7558            break;
7559        default:
7560            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7561            break;
7562        }
7563        if (!offset_align || (offset % offset_align)) {
7564            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7565                                DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7566                                "vkCmdBindIndexBuffer() offset (%#" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7567                                offset, string_VkIndexType(indexType));
7568        }
7569        pCB->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7570    }
7571    loader_platform_thread_unlock_mutex(&globalLock);
7572    if (VK_FALSE == skipCall)
7573        dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7574}
7575
7576void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7577    uint32_t end = firstBinding + bindingCount;
7578    if (pCB->currentDrawData.buffers.size() < end) {
7579        pCB->currentDrawData.buffers.resize(end);
7580    }
7581    for (uint32_t i = 0; i < bindingCount; ++i) {
7582        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7583    }
7584}
7585
7586void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7587
7588VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7589                                                                  uint32_t bindingCount, const VkBuffer *pBuffers,
7590                                                                  const VkDeviceSize *pOffsets) {
7591    VkBool32 skipCall = VK_FALSE;
7592    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7593    loader_platform_thread_lock_mutex(&globalLock);
7594#if MTMERGESOURCE
7595    for (uint32_t i = 0; i < bindingCount; ++i) {
7596        VkDeviceMemory mem;
7597        skipCall |= get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)(pBuffers[i]),
7598                                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7599        auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7600        if (cb_data != dev_data->commandBufferMap.end()) {
7601            std::function<VkBool32()> function =
7602                [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindVertexBuffers()"); };
7603            cb_data->second->validate_functions.push_back(function);
7604        }
7605    }
7606    // TODO : Somewhere need to verify that VBs have correct usage state flagged
7607#endif
7608    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7609    if (pCB) {
7610        addCmd(dev_data, pCB, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7611        updateResourceTracking(pCB, firstBinding, bindingCount, pBuffers);
7612    } else {
7613        skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7614    }
7615    loader_platform_thread_unlock_mutex(&globalLock);
7616    if (VK_FALSE == skipCall)
7617        dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7618}
7619
7620/* expects globalLock to be held by caller */
7621bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
7622    bool skip_call = false;
7623
7624    for (auto imageView : pCB->updateImages) {
7625        auto iv_data = dev_data->imageViewMap.find(imageView);
7626        if (iv_data == dev_data->imageViewMap.end())
7627            continue;
7628        VkImage image = iv_data->second.image;
7629        VkDeviceMemory mem;
7630        skip_call |=
7631            get_mem_binding_from_object(dev_data, pCB->commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7632        std::function<VkBool32()> function = [=]() {
7633            set_memory_valid(dev_data, mem, true, image);
7634            return VK_FALSE;
7635        };
7636        pCB->validate_functions.push_back(function);
7637    }
7638    for (auto buffer : pCB->updateBuffers) {
7639        VkDeviceMemory mem;
7640        skip_call |= get_mem_binding_from_object(dev_data, pCB->commandBuffer, (uint64_t)buffer,
7641                                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7642        std::function<VkBool32()> function = [=]() {
7643            set_memory_valid(dev_data, mem, true);
7644            return VK_FALSE;
7645        };
7646        pCB->validate_functions.push_back(function);
7647    }
7648    return skip_call;
7649}
7650
7651VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7652                                                     uint32_t firstVertex, uint32_t firstInstance) {
7653    VkBool32 skipCall = VK_FALSE;
7654    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7655    loader_platform_thread_lock_mutex(&globalLock);
7656    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7657    if (pCB) {
7658        skipCall |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
7659        pCB->drawCount[DRAW]++;
7660        skipCall |= validate_and_update_draw_state(dev_data, pCB, VK_FALSE, VK_PIPELINE_BIND_POINT_GRAPHICS);
7661        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7662        // TODO : Need to pass commandBuffer as srcObj here
7663        skipCall |=
7664            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7665                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW]++);
7666        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7667        if (VK_FALSE == skipCall) {
7668            updateResourceTrackingOnDraw(pCB);
7669        }
7670        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
7671    }
7672    loader_platform_thread_unlock_mutex(&globalLock);
7673    if (VK_FALSE == skipCall)
7674        dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
7675}
7676
7677VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
7678                                                            uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
7679                                                            uint32_t firstInstance) {
7680    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7681    VkBool32 skipCall = VK_FALSE;
7682    loader_platform_thread_lock_mutex(&globalLock);
7683    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7684    if (pCB) {
7685        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
7686        pCB->drawCount[DRAW_INDEXED]++;
7687        skipCall |= validate_and_update_draw_state(dev_data, pCB, VK_TRUE, VK_PIPELINE_BIND_POINT_GRAPHICS);
7688        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7689        // TODO : Need to pass commandBuffer as srcObj here
7690        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7691                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7692                            "vkCmdDrawIndexed() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
7693        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7694        if (VK_FALSE == skipCall) {
7695            updateResourceTrackingOnDraw(pCB);
7696        }
7697        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
7698    }
7699    loader_platform_thread_unlock_mutex(&globalLock);
7700    if (VK_FALSE == skipCall)
7701        dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
7702                                                        firstInstance);
7703}
7704
7705VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7706vkCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7707    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7708    VkBool32 skipCall = VK_FALSE;
7709    loader_platform_thread_lock_mutex(&globalLock);
7710#if MTMERGESOURCE
7711    VkDeviceMemory mem;
7712    // MTMTODO : merge with code below
7713    skipCall =
7714        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7715    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndirect");
7716#endif
7717    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7718    if (pCB) {
7719        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
7720        pCB->drawCount[DRAW_INDIRECT]++;
7721        skipCall |= validate_and_update_draw_state(dev_data, pCB, VK_FALSE, VK_PIPELINE_BIND_POINT_GRAPHICS);
7722        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7723        // TODO : Need to pass commandBuffer as srcObj here
7724        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7725                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7726                            "vkCmdDrawIndirect() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
7727        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7728        if (VK_FALSE == skipCall) {
7729            updateResourceTrackingOnDraw(pCB);
7730        }
7731        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndirect");
7732    }
7733    loader_platform_thread_unlock_mutex(&globalLock);
7734    if (VK_FALSE == skipCall)
7735        dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
7736}
7737
7738VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7739vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7740    VkBool32 skipCall = VK_FALSE;
7741    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7742    loader_platform_thread_lock_mutex(&globalLock);
7743#if MTMERGESOURCE
7744    VkDeviceMemory mem;
7745    // MTMTODO : merge with code below
7746    skipCall =
7747        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7748    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndexedIndirect");
7749#endif
7750    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7751    if (pCB) {
7752        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
7753        pCB->drawCount[DRAW_INDEXED_INDIRECT]++;
7754        skipCall |= validate_and_update_draw_state(dev_data, pCB, VK_TRUE, VK_PIPELINE_BIND_POINT_GRAPHICS);
7755        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7756        // TODO : Need to pass commandBuffer as srcObj here
7757        skipCall |=
7758            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7759                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call #%" PRIu64 ", reporting DS state:",
7760                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
7761        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7762        if (VK_FALSE == skipCall) {
7763            updateResourceTrackingOnDraw(pCB);
7764        }
7765        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexedIndirect");
7766    }
7767    loader_platform_thread_unlock_mutex(&globalLock);
7768    if (VK_FALSE == skipCall)
7769        dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
7770}
7771
7772VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7773    VkBool32 skipCall = VK_FALSE;
7774    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7775    loader_platform_thread_lock_mutex(&globalLock);
7776    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7777    if (pCB) {
7778        // TODO : Re-enable validate_and_update_draw_state() when it supports compute shaders
7779        // skipCall |= validate_and_update_draw_state(dev_data, pCB, VK_FALSE, VK_PIPELINE_BIND_POINT_COMPUTE);
7780        // TODO : Call below is temporary until call above can be re-enabled
7781        update_shader_storage_images_and_buffers(dev_data, pCB);
7782        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7783        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
7784        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
7785    }
7786    loader_platform_thread_unlock_mutex(&globalLock);
7787    if (VK_FALSE == skipCall)
7788        dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
7789}
7790
7791VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7792vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7793    VkBool32 skipCall = VK_FALSE;
7794    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7795    loader_platform_thread_lock_mutex(&globalLock);
7796#if MTMERGESOURCE
7797    VkDeviceMemory mem;
7798    skipCall =
7799        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7800    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDispatchIndirect");
7801#endif
7802    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7803    if (pCB) {
7804        // TODO : Re-enable validate_and_update_draw_state() when it supports compute shaders
7805        // skipCall |= validate_and_update_draw_state(dev_data, pCB, VK_FALSE, VK_PIPELINE_BIND_POINT_COMPUTE);
7806        // TODO : Call below is temporary until call above can be re-enabled
7807        update_shader_storage_images_and_buffers(dev_data, pCB);
7808        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7809        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
7810        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatchIndirect");
7811    }
7812    loader_platform_thread_unlock_mutex(&globalLock);
7813    if (VK_FALSE == skipCall)
7814        dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset);
7815}
7816
7817VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7818                                                           uint32_t regionCount, const VkBufferCopy *pRegions) {
7819    VkBool32 skipCall = VK_FALSE;
7820    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7821    loader_platform_thread_lock_mutex(&globalLock);
7822#if MTMERGESOURCE
7823    VkDeviceMemory mem;
7824    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7825    skipCall =
7826        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7827    if (cb_data != dev_data->commandBufferMap.end()) {
7828        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBuffer()"); };
7829        cb_data->second->validate_functions.push_back(function);
7830    }
7831    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7832    skipCall |=
7833        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7834    if (cb_data != dev_data->commandBufferMap.end()) {
7835        std::function<VkBool32()> function = [=]() {
7836            set_memory_valid(dev_data, mem, true);
7837            return VK_FALSE;
7838        };
7839        cb_data->second->validate_functions.push_back(function);
7840    }
7841    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7842    // Validate that SRC & DST buffers have correct usage flags set
7843    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
7844                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7845    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7846                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7847#endif
7848    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7849    if (pCB) {
7850        skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
7851        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBuffer");
7852    }
7853    loader_platform_thread_unlock_mutex(&globalLock);
7854    if (VK_FALSE == skipCall)
7855        dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7856}
7857
7858VkBool32 VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, VkImageSubresourceLayers subLayers,
7859                                 VkImageLayout srcImageLayout) {
7860    VkBool32 skip_call = VK_FALSE;
7861
7862    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7863    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7864    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7865        uint32_t layer = i + subLayers.baseArrayLayer;
7866        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7867        IMAGE_CMD_BUF_LAYOUT_NODE node;
7868        if (!FindLayout(pCB, srcImage, sub, node)) {
7869            SetLayout(pCB, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
7870            continue;
7871        }
7872        if (node.layout != srcImageLayout) {
7873            // TODO: Improve log message in the next pass
7874            skip_call |=
7875                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7876                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
7877                                                                        "and doesn't match the current layout %s.",
7878                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
7879        }
7880    }
7881    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
7882        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7883            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7884            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7885                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7886                                 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
7887        } else {
7888            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7889                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
7890                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
7891                                 string_VkImageLayout(srcImageLayout));
7892        }
7893    }
7894    return skip_call;
7895}
7896
7897VkBool32 VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkImageSubresourceLayers subLayers,
7898                               VkImageLayout destImageLayout) {
7899    VkBool32 skip_call = VK_FALSE;
7900
7901    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7902    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7903    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7904        uint32_t layer = i + subLayers.baseArrayLayer;
7905        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7906        IMAGE_CMD_BUF_LAYOUT_NODE node;
7907        if (!FindLayout(pCB, destImage, sub, node)) {
7908            SetLayout(pCB, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
7909            continue;
7910        }
7911        if (node.layout != destImageLayout) {
7912            skip_call |=
7913                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7914                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7915                                                                        "doesn't match the current layout %s.",
7916                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7917        }
7918    }
7919    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7920        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7921            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7922            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7923                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7924                                 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7925        } else {
7926            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7927                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
7928                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
7929                                 string_VkImageLayout(destImageLayout));
7930        }
7931    }
7932    return skip_call;
7933}
7934
7935VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7936vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7937               VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
7938    VkBool32 skipCall = VK_FALSE;
7939    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7940    loader_platform_thread_lock_mutex(&globalLock);
7941#if MTMERGESOURCE
7942    VkDeviceMemory mem;
7943    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7944    // Validate that src & dst images have correct usage flags set
7945    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7946    if (cb_data != dev_data->commandBufferMap.end()) {
7947        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImage()", srcImage); };
7948        cb_data->second->validate_functions.push_back(function);
7949    }
7950    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
7951    skipCall |=
7952        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7953    if (cb_data != dev_data->commandBufferMap.end()) {
7954        std::function<VkBool32()> function = [=]() {
7955            set_memory_valid(dev_data, mem, true, dstImage);
7956            return VK_FALSE;
7957        };
7958        cb_data->second->validate_functions.push_back(function);
7959    }
7960    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
7961    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7962                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7963    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7964                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7965#endif
7966    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7967    if (pCB) {
7968        skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGE, "vkCmdCopyImage()");
7969        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImage");
7970        for (uint32_t i = 0; i < regionCount; ++i) {
7971            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].srcSubresource, srcImageLayout);
7972            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].dstSubresource, dstImageLayout);
7973        }
7974    }
7975    loader_platform_thread_unlock_mutex(&globalLock);
7976    if (VK_FALSE == skipCall)
7977        dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7978                                                      regionCount, pRegions);
7979}
7980
7981VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7982vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7983               VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
7984    VkBool32 skipCall = VK_FALSE;
7985    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7986    loader_platform_thread_lock_mutex(&globalLock);
7987#if MTMERGESOURCE
7988    VkDeviceMemory mem;
7989    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7990    // Validate that src & dst images have correct usage flags set
7991    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7992    if (cb_data != dev_data->commandBufferMap.end()) {
7993        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBlitImage()", srcImage); };
7994        cb_data->second->validate_functions.push_back(function);
7995    }
7996    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
7997    skipCall |=
7998        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7999    if (cb_data != dev_data->commandBufferMap.end()) {
8000        std::function<VkBool32()> function = [=]() {
8001            set_memory_valid(dev_data, mem, true, dstImage);
8002            return VK_FALSE;
8003        };
8004        cb_data->second->validate_functions.push_back(function);
8005    }
8006    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
8007    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8008                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8009    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8010                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8011#endif
8012    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8013    if (pCB) {
8014        skipCall |= addCmd(dev_data, pCB, CMD_BLITIMAGE, "vkCmdBlitImage()");
8015        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBlitImage");
8016    }
8017    loader_platform_thread_unlock_mutex(&globalLock);
8018    if (VK_FALSE == skipCall)
8019        dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8020                                                      regionCount, pRegions, filter);
8021}
8022
8023VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
8024                                                                  VkImage dstImage, VkImageLayout dstImageLayout,
8025                                                                  uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8026    VkBool32 skipCall = VK_FALSE;
8027    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8028    loader_platform_thread_lock_mutex(&globalLock);
8029#if MTMERGESOURCE
8030    VkDeviceMemory mem;
8031    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8032    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8033    if (cb_data != dev_data->commandBufferMap.end()) {
8034        std::function<VkBool32()> function = [=]() {
8035            set_memory_valid(dev_data, mem, true, dstImage);
8036            return VK_FALSE;
8037        };
8038        cb_data->second->validate_functions.push_back(function);
8039    }
8040    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
8041    skipCall |=
8042        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8043    if (cb_data != dev_data->commandBufferMap.end()) {
8044        std::function<VkBool32()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBufferToImage()"); };
8045        cb_data->second->validate_functions.push_back(function);
8046    }
8047    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
8048    // Validate that src buff & dst image have correct usage flags set
8049    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
8050                                            "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
8051    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8052                                           "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8053#endif
8054    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8055    if (pCB) {
8056        skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
8057        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBufferToImage");
8058        for (uint32_t i = 0; i < regionCount; ++i) {
8059            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].imageSubresource, dstImageLayout);
8060        }
8061    }
8062    loader_platform_thread_unlock_mutex(&globalLock);
8063    if (VK_FALSE == skipCall)
8064        dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
8065                                                              pRegions);
8066}
8067
8068VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
8069                                                                  VkImageLayout srcImageLayout, VkBuffer dstBuffer,
8070                                                                  uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8071    VkBool32 skipCall = VK_FALSE;
8072    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8073    loader_platform_thread_lock_mutex(&globalLock);
8074#if MTMERGESOURCE
8075    VkDeviceMemory mem;
8076    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8077    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8078    if (cb_data != dev_data->commandBufferMap.end()) {
8079        std::function<VkBool32()> function =
8080            [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImageToBuffer()", srcImage); };
8081        cb_data->second->validate_functions.push_back(function);
8082    }
8083    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
8084    skipCall |=
8085        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8086    if (cb_data != dev_data->commandBufferMap.end()) {
8087        std::function<VkBool32()> function = [=]() {
8088            set_memory_valid(dev_data, mem, true);
8089            return VK_FALSE;
8090        };
8091        cb_data->second->validate_functions.push_back(function);
8092    }
8093    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
8094    // Validate that dst buff & src image have correct usage flags set
8095    skipCall |= validate_image_usage_flags(dev_data, commandBuffer, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8096                                           "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8097    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8098                                            "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8099#endif
8100    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8101    if (pCB) {
8102        skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
8103        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImageToBuffer");
8104        for (uint32_t i = 0; i < regionCount; ++i) {
8105            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].imageSubresource, srcImageLayout);
8106        }
8107    }
8108    loader_platform_thread_unlock_mutex(&globalLock);
8109    if (VK_FALSE == skipCall)
8110        dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
8111                                                              pRegions);
8112}
8113
8114VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
8115                                                             VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
8116    VkBool32 skipCall = VK_FALSE;
8117    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8118    loader_platform_thread_lock_mutex(&globalLock);
8119#if MTMERGESOURCE
8120    VkDeviceMemory mem;
8121    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8122    skipCall =
8123        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8124    if (cb_data != dev_data->commandBufferMap.end()) {
8125        std::function<VkBool32()> function = [=]() {
8126            set_memory_valid(dev_data, mem, true);
8127            return VK_FALSE;
8128        };
8129        cb_data->second->validate_functions.push_back(function);
8130    }
8131    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdUpdateBuffer");
8132    // Validate that dst buff has correct usage flags set
8133    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8134                                            "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8135#endif
8136    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8137    if (pCB) {
8138        skipCall |= addCmd(dev_data, pCB, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
8139        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyUpdateBuffer");
8140    }
8141    loader_platform_thread_unlock_mutex(&globalLock);
8142    if (VK_FALSE == skipCall)
8143        dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
8144}
8145
8146VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8147vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
8148    VkBool32 skipCall = VK_FALSE;
8149    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8150    loader_platform_thread_lock_mutex(&globalLock);
8151#if MTMERGESOURCE
8152    VkDeviceMemory mem;
8153    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8154    skipCall =
8155        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8156    if (cb_data != dev_data->commandBufferMap.end()) {
8157        std::function<VkBool32()> function = [=]() {
8158            set_memory_valid(dev_data, mem, true);
8159            return VK_FALSE;
8160        };
8161        cb_data->second->validate_functions.push_back(function);
8162    }
8163    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdFillBuffer");
8164    // Validate that dst buff has correct usage flags set
8165    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8166                                            "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8167#endif
8168    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8169    if (pCB) {
8170        skipCall |= addCmd(dev_data, pCB, CMD_FILLBUFFER, "vkCmdFillBuffer()");
8171        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyFillBuffer");
8172    }
8173    loader_platform_thread_unlock_mutex(&globalLock);
8174    if (VK_FALSE == skipCall)
8175        dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
8176}
8177
8178VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
8179                                                                 const VkClearAttachment *pAttachments, uint32_t rectCount,
8180                                                                 const VkClearRect *pRects) {
8181    VkBool32 skipCall = VK_FALSE;
8182    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8183    loader_platform_thread_lock_mutex(&globalLock);
8184    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8185    if (pCB) {
8186        skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
8187        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
8188        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
8189            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
8190            // TODO : commandBuffer should be srcObj
8191            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
8192            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
8193            // call CmdClearAttachments
8194            // Otherwise this seems more like a performance warning.
8195            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8196                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
8197                                "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
8198                                " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
8199                                (uint64_t)(commandBuffer));
8200        }
8201        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments");
8202    }
8203
8204    // Validate that attachment is in reference list of active subpass
8205    if (pCB->activeRenderPass) {
8206        const VkRenderPassCreateInfo *pRPCI = dev_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
8207        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
8208
8209        for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
8210            const VkClearAttachment *attachment = &pAttachments[attachment_idx];
8211            if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
8212                VkBool32 found = VK_FALSE;
8213                for (uint32_t i = 0; i < pSD->colorAttachmentCount; i++) {
8214                    if (attachment->colorAttachment == pSD->pColorAttachments[i].attachment) {
8215                        found = VK_TRUE;
8216                        break;
8217                    }
8218                }
8219                if (VK_FALSE == found) {
8220                    skipCall |= log_msg(
8221                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8222                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8223                        "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d",
8224                        attachment->colorAttachment, pCB->activeSubpass);
8225                }
8226            } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
8227                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
8228                    (pSD->pDepthStencilAttachment->attachment ==
8229                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
8230
8231                    skipCall |= log_msg(
8232                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8233                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8234                        "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found "
8235                        "in active subpass %d",
8236                        attachment->colorAttachment,
8237                        (pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED,
8238                        pCB->activeSubpass);
8239                }
8240            }
8241        }
8242    }
8243    loader_platform_thread_unlock_mutex(&globalLock);
8244    if (VK_FALSE == skipCall)
8245        dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8246}
8247
8248VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
8249                                                                VkImageLayout imageLayout, const VkClearColorValue *pColor,
8250                                                                uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
8251    VkBool32 skipCall = VK_FALSE;
8252    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8253    loader_platform_thread_lock_mutex(&globalLock);
8254#if MTMERGESOURCE
8255    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8256    VkDeviceMemory mem;
8257    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8258    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8259    if (cb_data != dev_data->commandBufferMap.end()) {
8260        std::function<VkBool32()> function = [=]() {
8261            set_memory_valid(dev_data, mem, true, image);
8262            return VK_FALSE;
8263        };
8264        cb_data->second->validate_functions.push_back(function);
8265    }
8266    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearColorImage");
8267#endif
8268    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8269    if (pCB) {
8270        skipCall |= addCmd(dev_data, pCB, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
8271        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearColorImage");
8272    }
8273    loader_platform_thread_unlock_mutex(&globalLock);
8274    if (VK_FALSE == skipCall)
8275        dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
8276}
8277
8278VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8279vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8280                            const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
8281                            const VkImageSubresourceRange *pRanges) {
8282    VkBool32 skipCall = VK_FALSE;
8283    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8284    loader_platform_thread_lock_mutex(&globalLock);
8285#if MTMERGESOURCE
8286    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8287    VkDeviceMemory mem;
8288    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8289    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8290    if (cb_data != dev_data->commandBufferMap.end()) {
8291        std::function<VkBool32()> function = [=]() {
8292            set_memory_valid(dev_data, mem, true, image);
8293            return VK_FALSE;
8294        };
8295        cb_data->second->validate_functions.push_back(function);
8296    }
8297    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearDepthStencilImage");
8298#endif
8299    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8300    if (pCB) {
8301        skipCall |= addCmd(dev_data, pCB, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
8302        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearDepthStencilImage");
8303    }
8304    loader_platform_thread_unlock_mutex(&globalLock);
8305    if (VK_FALSE == skipCall)
8306        dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
8307                                                                   pRanges);
8308}
8309
8310VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8311vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8312                  VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
8313    VkBool32 skipCall = VK_FALSE;
8314    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8315    loader_platform_thread_lock_mutex(&globalLock);
8316#if MTMERGESOURCE
8317    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8318    VkDeviceMemory mem;
8319    skipCall = get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8320    if (cb_data != dev_data->commandBufferMap.end()) {
8321        std::function<VkBool32()> function =
8322            [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdResolveImage()", srcImage); };
8323        cb_data->second->validate_functions.push_back(function);
8324    }
8325    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
8326    skipCall |=
8327        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
8328    if (cb_data != dev_data->commandBufferMap.end()) {
8329        std::function<VkBool32()> function = [=]() {
8330            set_memory_valid(dev_data, mem, true, dstImage);
8331            return VK_FALSE;
8332        };
8333        cb_data->second->validate_functions.push_back(function);
8334    }
8335    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
8336#endif
8337    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8338    if (pCB) {
8339        skipCall |= addCmd(dev_data, pCB, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
8340        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResolveImage");
8341    }
8342    loader_platform_thread_unlock_mutex(&globalLock);
8343    if (VK_FALSE == skipCall)
8344        dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8345                                                         regionCount, pRegions);
8346}
8347
8348bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8349    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8350    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8351    if (pCB) {
8352        pCB->eventToStageMap[event] = stageMask;
8353    }
8354    auto queue_data = dev_data->queueMap.find(queue);
8355    if (queue_data != dev_data->queueMap.end()) {
8356        queue_data->second.eventToStageMap[event] = stageMask;
8357    }
8358    return false;
8359}
8360
8361VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8362vkCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8363    VkBool32 skipCall = VK_FALSE;
8364    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8365    loader_platform_thread_lock_mutex(&globalLock);
8366    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8367    if (pCB) {
8368        skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
8369        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
8370        pCB->events.push_back(event);
8371        std::function<bool(VkQueue)> eventUpdate =
8372            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
8373        pCB->eventUpdates.push_back(eventUpdate);
8374    }
8375    loader_platform_thread_unlock_mutex(&globalLock);
8376    if (VK_FALSE == skipCall)
8377        dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask);
8378}
8379
8380VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8381vkCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8382    VkBool32 skipCall = VK_FALSE;
8383    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8384    loader_platform_thread_lock_mutex(&globalLock);
8385    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8386    if (pCB) {
8387        skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
8388        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
8389        pCB->events.push_back(event);
8390        std::function<bool(VkQueue)> eventUpdate =
8391            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
8392        pCB->eventUpdates.push_back(eventUpdate);
8393    }
8394    loader_platform_thread_unlock_mutex(&globalLock);
8395    if (VK_FALSE == skipCall)
8396        dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask);
8397}
8398
8399VkBool32 TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount, const VkImageMemoryBarrier *pImgMemBarriers) {
8400    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8401    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8402    VkBool32 skip = VK_FALSE;
8403    uint32_t levelCount = 0;
8404    uint32_t layerCount = 0;
8405
8406    for (uint32_t i = 0; i < memBarrierCount; ++i) {
8407        auto mem_barrier = &pImgMemBarriers[i];
8408        if (!mem_barrier)
8409            continue;
8410        // TODO: Do not iterate over every possibility - consolidate where
8411        // possible
8412        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
8413
8414        for (uint32_t j = 0; j < levelCount; j++) {
8415            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
8416            for (uint32_t k = 0; k < layerCount; k++) {
8417                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
8418                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
8419                IMAGE_CMD_BUF_LAYOUT_NODE node;
8420                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
8421                    SetLayout(pCB, mem_barrier->image, sub,
8422                              IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
8423                    continue;
8424                }
8425                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
8426                    // TODO: Set memory invalid which is in mem_tracker currently
8427                } else if (node.layout != mem_barrier->oldLayout) {
8428                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8429                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
8430                                                                                    "when current layout is %s.",
8431                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
8432                }
8433                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
8434            }
8435        }
8436    }
8437    return skip;
8438}
8439
8440// Print readable FlagBits in FlagMask
8441std::string string_VkAccessFlags(VkAccessFlags accessMask) {
8442    std::string result;
8443    std::string separator;
8444
8445    if (accessMask == 0) {
8446        result = "[None]";
8447    } else {
8448        result = "[";
8449        for (auto i = 0; i < 32; i++) {
8450            if (accessMask & (1 << i)) {
8451                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
8452                separator = " | ";
8453            }
8454        }
8455        result = result + "]";
8456    }
8457    return result;
8458}
8459
8460// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
8461// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
8462// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
8463VkBool32 ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8464                          const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits, const char *type) {
8465    VkBool32 skip_call = VK_FALSE;
8466
8467    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
8468        if (accessMask & !(required_bit | optional_bits)) {
8469            // TODO: Verify against Valid Use
8470            skip_call |=
8471                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8472                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8473                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8474        }
8475    } else {
8476        if (!required_bit) {
8477            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8478                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
8479                                                                  "%s when layout is %s, unless the app has previously added a "
8480                                                                  "barrier for this transition.",
8481                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
8482                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
8483        } else {
8484            std::string opt_bits;
8485            if (optional_bits != 0) {
8486                std::stringstream ss;
8487                ss << optional_bits;
8488                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
8489            }
8490            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8491                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
8492                                                                  "layout is %s, unless the app has previously added a barrier for "
8493                                                                  "this transition.",
8494                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
8495                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
8496        }
8497    }
8498    return skip_call;
8499}
8500
8501VkBool32 ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8502                                     const VkImageLayout &layout, const char *type) {
8503    VkBool32 skip_call = VK_FALSE;
8504    switch (layout) {
8505    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
8506        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
8507                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
8508        break;
8509    }
8510    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
8511        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
8512                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
8513        break;
8514    }
8515    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
8516        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
8517        break;
8518    }
8519    case VK_IMAGE_LAYOUT_PREINITIALIZED: {
8520        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
8521        break;
8522    }
8523    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
8524        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8525                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8526        break;
8527    }
8528    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
8529        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8530                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8531        break;
8532    }
8533    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
8534        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
8535        break;
8536    }
8537    case VK_IMAGE_LAYOUT_UNDEFINED: {
8538        if (accessMask != 0) {
8539            // TODO: Verify against Valid Use section spec
8540            skip_call |=
8541                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8542                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8543                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8544        }
8545        break;
8546    }
8547    case VK_IMAGE_LAYOUT_GENERAL:
8548    default: { break; }
8549    }
8550    return skip_call;
8551}
8552
8553VkBool32 ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8554                          const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
8555                          const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
8556                          const VkImageMemoryBarrier *pImageMemBarriers) {
8557    VkBool32 skip_call = VK_FALSE;
8558    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8559    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8560    if (pCB->activeRenderPass && memBarrierCount) {
8561        if (!dev_data->renderPassMap[pCB->activeRenderPass]->hasSelfDependency[pCB->activeSubpass]) {
8562            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8563                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
8564                                                                  "with no self dependency specified.",
8565                                 funcName, pCB->activeSubpass);
8566        }
8567    }
8568    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
8569        auto mem_barrier = &pImageMemBarriers[i];
8570        auto image_data = dev_data->imageMap.find(mem_barrier->image);
8571        if (image_data != dev_data->imageMap.end()) {
8572            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
8573            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
8574            if (image_data->second.createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
8575                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
8576                // be VK_QUEUE_FAMILY_IGNORED
8577                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
8578                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8579                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8580                                         "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
8581                                         "VK_SHARING_MODE_CONCURRENT.  Src and dst "
8582                                         " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
8583                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8584                }
8585            } else {
8586                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
8587                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
8588                // or both be a valid queue family
8589                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
8590                    (src_q_f_index != dst_q_f_index)) {
8591                    skip_call |=
8592                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8593                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
8594                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
8595                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
8596                                                                     "must be.",
8597                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8598                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
8599                           ((src_q_f_index >= dev_data->physDevProperties.queue_family_properties.size()) ||
8600                            (dst_q_f_index >= dev_data->physDevProperties.queue_family_properties.size()))) {
8601                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8602                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8603                                         "%s: Image 0x%" PRIx64 " was created with sharingMode "
8604                                         "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
8605                                         " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
8606                                         "queueFamilies crated for this device.",
8607                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
8608                                         dst_q_f_index, dev_data->physDevProperties.queue_family_properties.size());
8609                }
8610            }
8611        }
8612
8613        if (mem_barrier) {
8614            skip_call |=
8615                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
8616            skip_call |=
8617                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
8618            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
8619                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8620                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
8621                                                         "PREINITIALIZED.",
8622                        funcName);
8623            }
8624            auto image_data = dev_data->imageMap.find(mem_barrier->image);
8625            VkFormat format;
8626            uint32_t arrayLayers, mipLevels;
8627            bool imageFound = false;
8628            if (image_data != dev_data->imageMap.end()) {
8629                format = image_data->second.createInfo.format;
8630                arrayLayers = image_data->second.createInfo.arrayLayers;
8631                mipLevels = image_data->second.createInfo.mipLevels;
8632                imageFound = true;
8633            } else if (dev_data->device_extensions.wsi_enabled) {
8634                auto imageswap_data = dev_data->device_extensions.imageToSwapchainMap.find(mem_barrier->image);
8635                if (imageswap_data != dev_data->device_extensions.imageToSwapchainMap.end()) {
8636                    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(imageswap_data->second);
8637                    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
8638                        format = swapchain_data->second->createInfo.imageFormat;
8639                        arrayLayers = swapchain_data->second->createInfo.imageArrayLayers;
8640                        mipLevels = 1;
8641                        imageFound = true;
8642                    }
8643                }
8644            }
8645            if (imageFound) {
8646                if (vk_format_is_depth_and_stencil(format) &&
8647                    (!(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
8648                     !(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))) {
8649                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8650                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth and stencil format and thus must "
8651                                                             "have both VK_IMAGE_ASPECT_DEPTH_BIT and "
8652                                                             "VK_IMAGE_ASPECT_STENCIL_BIT set.",
8653                            funcName);
8654                }
8655                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
8656                                     ? 1
8657                                     : mem_barrier->subresourceRange.layerCount;
8658                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
8659                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8660                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
8661                                                             "baseArrayLayer (%d) and layerCount (%d) be less "
8662                                                             "than or equal to the total number of layers (%d).",
8663                            funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
8664                            arrayLayers);
8665                }
8666                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
8667                                     ? 1
8668                                     : mem_barrier->subresourceRange.levelCount;
8669                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
8670                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8671                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
8672                                                             "(%d) and levelCount (%d) be less than or equal to "
8673                                                             "the total number of levels (%d).",
8674                            funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
8675                            mipLevels);
8676                }
8677            }
8678        }
8679    }
8680    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
8681        auto mem_barrier = &pBufferMemBarriers[i];
8682        if (pCB->activeRenderPass) {
8683            skip_call |=
8684                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8685                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
8686        }
8687        if (!mem_barrier)
8688            continue;
8689
8690        // Validate buffer barrier queue family indices
8691        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8692             mem_barrier->srcQueueFamilyIndex >= dev_data->physDevProperties.queue_family_properties.size()) ||
8693            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8694             mem_barrier->dstQueueFamilyIndex >= dev_data->physDevProperties.queue_family_properties.size())) {
8695            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8696                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8697                                 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
8698                                 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
8699                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8700                                 dev_data->physDevProperties.queue_family_properties.size());
8701        }
8702
8703        auto buffer_data = dev_data->bufferMap.find(mem_barrier->buffer);
8704        uint64_t buffer_size =
8705            buffer_data->second.create_info ? reinterpret_cast<uint64_t &>(buffer_data->second.create_info->size) : 0;
8706        if (buffer_data != dev_data->bufferMap.end()) {
8707            if (mem_barrier->offset >= buffer_size) {
8708                skip_call |=
8709                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8710                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64
8711                                                             " whose sum is not less than total size %" PRIu64 ".",
8712                            funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8713                            reinterpret_cast<const uint64_t &>(mem_barrier->offset), buffer_size);
8714            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
8715                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8716                                     __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
8717                                     "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64 " and size %" PRIu64
8718                                     " whose sum is greater than total size %" PRIu64 ".",
8719                                     funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8720                                     reinterpret_cast<const uint64_t &>(mem_barrier->offset),
8721                                     reinterpret_cast<const uint64_t &>(mem_barrier->size), buffer_size);
8722            }
8723        }
8724    }
8725    return skip_call;
8726}
8727
8728bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
8729    bool skip_call = false;
8730    VkPipelineStageFlags stageMask = 0;
8731    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
8732    for (uint32_t i = 0; i < eventCount; ++i) {
8733        auto event = pCB->events[firstEventIndex + i];
8734        auto queue_data = dev_data->queueMap.find(queue);
8735        if (queue_data == dev_data->queueMap.end())
8736            return false;
8737        auto event_data = queue_data->second.eventToStageMap.find(event);
8738        if (event_data != queue_data->second.eventToStageMap.end()) {
8739            stageMask |= event_data->second;
8740        } else {
8741            auto global_event_data = dev_data->eventMap.find(event);
8742            if (global_event_data == dev_data->eventMap.end()) {
8743                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8744                                     reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
8745                                     "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
8746                                     reinterpret_cast<const uint64_t &>(event));
8747            } else {
8748                stageMask |= global_event_data->second.stageMask;
8749            }
8750        }
8751    }
8752    if (sourceStageMask != stageMask) {
8753        skip_call |=
8754            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8755                    DRAWSTATE_INVALID_EVENT, "DS",
8756                    "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%x which must be the bitwise OR of the "
8757                    "stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with vkSetEvent.",
8758                    sourceStageMask);
8759    }
8760    return skip_call;
8761}
8762
8763VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8764vkCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
8765                VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8766                uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8767                uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8768    VkBool32 skipCall = VK_FALSE;
8769    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8770    loader_platform_thread_lock_mutex(&globalLock);
8771    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8772    if (pCB) {
8773        auto firstEventIndex = pCB->events.size();
8774        for (uint32_t i = 0; i < eventCount; ++i) {
8775            pCB->waitedEvents.push_back(pEvents[i]);
8776            pCB->events.push_back(pEvents[i]);
8777        }
8778        std::function<bool(VkQueue)> eventUpdate =
8779            std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask);
8780        pCB->eventUpdates.push_back(eventUpdate);
8781        if (pCB->state == CB_RECORDING) {
8782            skipCall |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
8783        } else {
8784            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
8785        }
8786        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8787        skipCall |=
8788            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8789                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8790    }
8791    loader_platform_thread_unlock_mutex(&globalLock);
8792    if (VK_FALSE == skipCall)
8793        dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
8794                                                       memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8795                                                       pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8796}
8797
8798VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8799vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
8800                     VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8801                     uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8802                     uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8803    VkBool32 skipCall = VK_FALSE;
8804    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8805    loader_platform_thread_lock_mutex(&globalLock);
8806    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8807    if (pCB) {
8808        skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8809        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8810        skipCall |=
8811            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8812                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8813    }
8814    loader_platform_thread_unlock_mutex(&globalLock);
8815    if (VK_FALSE == skipCall)
8816        dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
8817                                                            memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8818                                                            pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8819}
8820
8821VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8822vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8823    VkBool32 skipCall = VK_FALSE;
8824    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8825    loader_platform_thread_lock_mutex(&globalLock);
8826    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8827    if (pCB) {
8828        QueryObject query = {queryPool, slot};
8829        pCB->activeQueries.insert(query);
8830        if (!pCB->startedQueries.count(query)) {
8831            pCB->startedQueries.insert(query);
8832        }
8833        skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8834    }
8835    loader_platform_thread_unlock_mutex(&globalLock);
8836    if (VK_FALSE == skipCall)
8837        dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
8838}
8839
8840VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8841    VkBool32 skipCall = VK_FALSE;
8842    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8843    loader_platform_thread_lock_mutex(&globalLock);
8844    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8845    if (pCB) {
8846        QueryObject query = {queryPool, slot};
8847        if (!pCB->activeQueries.count(query)) {
8848            skipCall |=
8849                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8850                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool %" PRIu64 ", index %d",
8851                        (uint64_t)(queryPool), slot);
8852        } else {
8853            pCB->activeQueries.erase(query);
8854        }
8855        pCB->queryToStateMap[query] = 1;
8856        if (pCB->state == CB_RECORDING) {
8857            skipCall |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
8858        } else {
8859            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
8860        }
8861    }
8862    loader_platform_thread_unlock_mutex(&globalLock);
8863    if (VK_FALSE == skipCall)
8864        dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
8865}
8866
8867VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8868vkCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
8869    VkBool32 skipCall = VK_FALSE;
8870    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8871    loader_platform_thread_lock_mutex(&globalLock);
8872    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8873    if (pCB) {
8874        for (uint32_t i = 0; i < queryCount; i++) {
8875            QueryObject query = {queryPool, firstQuery + i};
8876            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
8877            pCB->queryToStateMap[query] = 0;
8878        }
8879        if (pCB->state == CB_RECORDING) {
8880            skipCall |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8881        } else {
8882            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
8883        }
8884        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
8885    }
8886    loader_platform_thread_unlock_mutex(&globalLock);
8887    if (VK_FALSE == skipCall)
8888        dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8889}
8890
8891VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8892vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
8893                          VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
8894    VkBool32 skipCall = VK_FALSE;
8895    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8896    loader_platform_thread_lock_mutex(&globalLock);
8897    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8898#if MTMERGESOURCE
8899    VkDeviceMemory mem;
8900    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8901    skipCall |=
8902        get_mem_binding_from_object(dev_data, commandBuffer, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8903    if (cb_data != dev_data->commandBufferMap.end()) {
8904        std::function<VkBool32()> function = [=]() {
8905            set_memory_valid(dev_data, mem, true);
8906            return VK_FALSE;
8907        };
8908        cb_data->second->validate_functions.push_back(function);
8909    }
8910    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyQueryPoolResults");
8911    // Validate that DST buffer has correct usage flags set
8912    skipCall |= validate_buffer_usage_flags(dev_data, commandBuffer, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8913                                            "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8914#endif
8915    if (pCB) {
8916        for (uint32_t i = 0; i < queryCount; i++) {
8917            QueryObject query = {queryPool, firstQuery + i};
8918            if (!pCB->queryToStateMap[query]) {
8919                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8920                                    __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
8921                                    "Requesting a copy from query to buffer with invalid query: queryPool %" PRIu64 ", index %d",
8922                                    (uint64_t)(queryPool), firstQuery + i);
8923            }
8924        }
8925        if (pCB->state == CB_RECORDING) {
8926            skipCall |= addCmd(dev_data, pCB, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8927        } else {
8928            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
8929        }
8930        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyQueryPoolResults");
8931    }
8932    loader_platform_thread_unlock_mutex(&globalLock);
8933    if (VK_FALSE == skipCall)
8934        dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
8935                                                                 dstOffset, stride, flags);
8936}
8937
8938VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
8939                                                              VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
8940                                                              const void *pValues) {
8941    bool skipCall = false;
8942    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8943    loader_platform_thread_lock_mutex(&globalLock);
8944    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8945    if (pCB) {
8946        if (pCB->state == CB_RECORDING) {
8947            skipCall |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8948        } else {
8949            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
8950        }
8951    }
8952    if ((offset + size) > dev_data->physDevProperties.properties.limits.maxPushConstantsSize) {
8953        skipCall |= validatePushConstantSize(dev_data, offset, size, "vkCmdPushConstants()");
8954    }
8955    // TODO : Add warning if push constant update doesn't align with range
8956    loader_platform_thread_unlock_mutex(&globalLock);
8957    if (!skipCall)
8958        dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8959}
8960
8961VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8962vkCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
8963    VkBool32 skipCall = VK_FALSE;
8964    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8965    loader_platform_thread_lock_mutex(&globalLock);
8966    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8967    if (pCB) {
8968        QueryObject query = {queryPool, slot};
8969        pCB->queryToStateMap[query] = 1;
8970        if (pCB->state == CB_RECORDING) {
8971            skipCall |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8972        } else {
8973            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
8974        }
8975    }
8976    loader_platform_thread_unlock_mutex(&globalLock);
8977    if (VK_FALSE == skipCall)
8978        dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8979}
8980
8981VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
8982                                                                   const VkAllocationCallbacks *pAllocator,
8983                                                                   VkFramebuffer *pFramebuffer) {
8984    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8985    VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
8986    if (VK_SUCCESS == result) {
8987        // Shadow create info and store in map
8988        loader_platform_thread_lock_mutex(&globalLock);
8989
8990        auto & fbNode = dev_data->frameBufferMap[*pFramebuffer];
8991        fbNode.createInfo = *pCreateInfo;
8992        if (pCreateInfo->pAttachments) {
8993            auto attachments = new VkImageView[pCreateInfo->attachmentCount];
8994            memcpy(attachments,
8995                   pCreateInfo->pAttachments,
8996                   pCreateInfo->attachmentCount * sizeof(VkImageView));
8997            fbNode.createInfo.pAttachments = attachments;
8998        }
8999        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9000            VkImageView view = pCreateInfo->pAttachments[i];
9001            auto view_data = dev_data->imageViewMap.find(view);
9002            if (view_data == dev_data->imageViewMap.end()) {
9003                continue;
9004            }
9005            MT_FB_ATTACHMENT_INFO fb_info;
9006            get_mem_binding_from_object(dev_data, device, (uint64_t)(view_data->second.image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9007                                        &fb_info.mem);
9008            fb_info.image = view_data->second.image;
9009            fbNode.attachments.push_back(fb_info);
9010        }
9011
9012        loader_platform_thread_unlock_mutex(&globalLock);
9013    }
9014    return result;
9015}
9016
9017VkBool32 FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
9018                        std::unordered_set<uint32_t> &processed_nodes) {
9019    // If we have already checked this node we have not found a dependency path so return false.
9020    if (processed_nodes.count(index))
9021        return VK_FALSE;
9022    processed_nodes.insert(index);
9023    const DAGNode &node = subpass_to_node[index];
9024    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
9025    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
9026        for (auto elem : node.prev) {
9027            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
9028                return VK_TRUE;
9029        }
9030    } else {
9031        return VK_TRUE;
9032    }
9033    return VK_FALSE;
9034}
9035
9036VkBool32 CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
9037                               const std::vector<DAGNode> &subpass_to_node, VkBool32 &skip_call) {
9038    VkBool32 result = VK_TRUE;
9039    // Loop through all subpasses that share the same attachment and make sure a dependency exists
9040    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
9041        if (subpass == dependent_subpasses[k])
9042            continue;
9043        const DAGNode &node = subpass_to_node[subpass];
9044        // Check for a specified dependency between the two nodes. If one exists we are done.
9045        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
9046        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
9047        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
9048            // If no dependency exits an implicit dependency still might. If so, warn and if not throw an error.
9049            std::unordered_set<uint32_t> processed_nodes;
9050            if (FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
9051                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes)) {
9052                // TODO: Verify against Valid Use section of spec
9053                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9054                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9055                                     "A dependency between subpasses %d and %d must exist but only an implicit one is specified.",
9056                                     subpass, dependent_subpasses[k]);
9057            } else {
9058                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9059                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9060                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
9061                                     dependent_subpasses[k]);
9062                result = VK_FALSE;
9063            }
9064        }
9065    }
9066    return result;
9067}
9068
9069VkBool32 CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
9070                        const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, VkBool32 &skip_call) {
9071    const DAGNode &node = subpass_to_node[index];
9072    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
9073    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9074    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9075        if (attachment == subpass.pColorAttachments[j].attachment)
9076            return VK_TRUE;
9077    }
9078    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9079        if (attachment == subpass.pDepthStencilAttachment->attachment)
9080            return VK_TRUE;
9081    }
9082    VkBool32 result = VK_FALSE;
9083    // Loop through previous nodes and see if any of them write to the attachment.
9084    for (auto elem : node.prev) {
9085        result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
9086    }
9087    // If the attachment was written to by a previous node than this node needs to preserve it.
9088    if (result && depth > 0) {
9089        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9090        VkBool32 has_preserved = VK_FALSE;
9091        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9092            if (subpass.pPreserveAttachments[j] == attachment) {
9093                has_preserved = VK_TRUE;
9094                break;
9095            }
9096        }
9097        if (has_preserved == VK_FALSE) {
9098            skip_call |=
9099                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9100                        DRAWSTATE_INVALID_RENDERPASS, "DS",
9101                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
9102        }
9103    }
9104    return result;
9105}
9106
9107template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
9108    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
9109           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
9110}
9111
9112bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
9113    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
9114            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
9115}
9116
9117VkBool32 ValidateDependencies(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin,
9118                              const std::vector<DAGNode> &subpass_to_node) {
9119    VkBool32 skip_call = VK_FALSE;
9120    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
9121    const VkRenderPassCreateInfo *pCreateInfo = my_data->renderPassMap.at(pRenderPassBegin->renderPass)->pCreateInfo;
9122    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
9123    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
9124    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
9125    // Find overlapping attachments
9126    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9127        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
9128            VkImageView viewi = pFramebufferInfo->pAttachments[i];
9129            VkImageView viewj = pFramebufferInfo->pAttachments[j];
9130            if (viewi == viewj) {
9131                overlapping_attachments[i].push_back(j);
9132                overlapping_attachments[j].push_back(i);
9133                continue;
9134            }
9135            auto view_data_i = my_data->imageViewMap.find(viewi);
9136            auto view_data_j = my_data->imageViewMap.find(viewj);
9137            if (view_data_i == my_data->imageViewMap.end() || view_data_j == my_data->imageViewMap.end()) {
9138                continue;
9139            }
9140            if (view_data_i->second.image == view_data_j->second.image &&
9141                isRegionOverlapping(view_data_i->second.subresourceRange, view_data_j->second.subresourceRange)) {
9142                overlapping_attachments[i].push_back(j);
9143                overlapping_attachments[j].push_back(i);
9144                continue;
9145            }
9146            auto image_data_i = my_data->imageMap.find(view_data_i->second.image);
9147            auto image_data_j = my_data->imageMap.find(view_data_j->second.image);
9148            if (image_data_i == my_data->imageMap.end() || image_data_j == my_data->imageMap.end()) {
9149                continue;
9150            }
9151            if (image_data_i->second.mem == image_data_j->second.mem &&
9152                isRangeOverlapping(image_data_i->second.memOffset, image_data_i->second.memSize, image_data_j->second.memOffset,
9153                                   image_data_j->second.memSize)) {
9154                overlapping_attachments[i].push_back(j);
9155                overlapping_attachments[j].push_back(i);
9156            }
9157        }
9158    }
9159    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
9160        uint32_t attachment = i;
9161        for (auto other_attachment : overlapping_attachments[i]) {
9162            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9163                skip_call |=
9164                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9165                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9166                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9167                            attachment, other_attachment);
9168            }
9169            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9170                skip_call |=
9171                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9172                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9173                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9174                            other_attachment, attachment);
9175            }
9176        }
9177    }
9178    // Find for each attachment the subpasses that use them.
9179    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9180        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9181        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9182            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9183            input_attachment_to_subpass[attachment].push_back(i);
9184            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9185                input_attachment_to_subpass[overlapping_attachment].push_back(i);
9186            }
9187        }
9188        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9189            uint32_t attachment = subpass.pColorAttachments[j].attachment;
9190            output_attachment_to_subpass[attachment].push_back(i);
9191            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9192                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9193            }
9194        }
9195        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9196            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9197            output_attachment_to_subpass[attachment].push_back(i);
9198            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9199                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9200            }
9201        }
9202    }
9203    // If there is a dependency needed make sure one exists
9204    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9205        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9206        // If the attachment is an input then all subpasses that output must have a dependency relationship
9207        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9208            const uint32_t &attachment = subpass.pInputAttachments[j].attachment;
9209            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9210        }
9211        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
9212        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9213            const uint32_t &attachment = subpass.pColorAttachments[j].attachment;
9214            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9215            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9216        }
9217        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9218            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
9219            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9220            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9221        }
9222    }
9223    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
9224    // written.
9225    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9226        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9227        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9228            CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
9229        }
9230    }
9231    return skip_call;
9232}
9233
9234VkBool32 ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
9235    VkBool32 skip = VK_FALSE;
9236
9237    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9238        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9239        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9240            if (subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
9241                subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
9242                if (subpass.pInputAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
9243                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9244                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9245                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9246                                    "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
9247                } else {
9248                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9249                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9250                                    "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
9251                                    string_VkImageLayout(subpass.pInputAttachments[j].layout));
9252                }
9253            }
9254        }
9255        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9256            if (subpass.pColorAttachments[j].layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
9257                if (subpass.pColorAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
9258                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9259                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9260                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9261                                    "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
9262                } else {
9263                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9264                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9265                                    "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
9266                                    string_VkImageLayout(subpass.pColorAttachments[j].layout));
9267                }
9268            }
9269        }
9270        if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9271            if (subpass.pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
9272                if (subpass.pDepthStencilAttachment->layout == VK_IMAGE_LAYOUT_GENERAL) {
9273                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
9274                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9275                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9276                                    "Layout for depth attachment is GENERAL but should be DEPTH_STENCIL_ATTACHMENT_OPTIMAL.");
9277                } else {
9278                    skip |=
9279                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9280                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9281                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.",
9282                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
9283                }
9284            }
9285        }
9286    }
9287    return skip;
9288}
9289
9290VkBool32 CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9291                       std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
9292    VkBool32 skip_call = VK_FALSE;
9293    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9294        DAGNode &subpass_node = subpass_to_node[i];
9295        subpass_node.pass = i;
9296    }
9297    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9298        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
9299        if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
9300            dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9301            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9302                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
9303                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
9304        } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
9305            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9306                                 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
9307        } else if (dependency.srcSubpass == dependency.dstSubpass) {
9308            has_self_dependency[dependency.srcSubpass] = true;
9309        }
9310        if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9311            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
9312        }
9313        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
9314            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
9315        }
9316    }
9317    return skip_call;
9318}
9319
9320
9321VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
9322                                                                    const VkAllocationCallbacks *pAllocator,
9323                                                                    VkShaderModule *pShaderModule) {
9324    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9325    VkBool32 skip_call = VK_FALSE;
9326    if (!shader_is_spirv(pCreateInfo)) {
9327        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
9328                             /* dev */ 0, __LINE__, SHADER_CHECKER_NON_SPIRV_SHADER, "SC", "Shader is not SPIR-V");
9329    }
9330
9331    if (VK_FALSE != skip_call)
9332        return VK_ERROR_VALIDATION_FAILED_EXT;
9333
9334    VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
9335
9336    if (res == VK_SUCCESS) {
9337        loader_platform_thread_lock_mutex(&globalLock);
9338        my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
9339        loader_platform_thread_unlock_mutex(&globalLock);
9340    }
9341    return res;
9342}
9343
9344VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9345                                                                  const VkAllocationCallbacks *pAllocator,
9346                                                                  VkRenderPass *pRenderPass) {
9347    VkBool32 skip_call = VK_FALSE;
9348    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9349    loader_platform_thread_lock_mutex(&globalLock);
9350    // Create DAG
9351    std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
9352    std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
9353    skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
9354    // Validate
9355    skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
9356    if (VK_FALSE != skip_call) {
9357        loader_platform_thread_unlock_mutex(&globalLock);
9358        return VK_ERROR_VALIDATION_FAILED_EXT;
9359    }
9360    loader_platform_thread_unlock_mutex(&globalLock);
9361    VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
9362    if (VK_SUCCESS == result) {
9363        loader_platform_thread_lock_mutex(&globalLock);
9364        // TODOSC : Merge in tracking of renderpass from shader_checker
9365        // Shadow create info and store in map
9366        VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
9367        if (pCreateInfo->pAttachments) {
9368            localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount];
9369            memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments,
9370                   localRPCI->attachmentCount * sizeof(VkAttachmentDescription));
9371        }
9372        if (pCreateInfo->pSubpasses) {
9373            localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount];
9374            memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription));
9375
9376            for (uint32_t i = 0; i < localRPCI->subpassCount; i++) {
9377                VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i];
9378                const uint32_t attachmentCount = subpass->inputAttachmentCount +
9379                                                 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) +
9380                                                 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
9381                VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount];
9382
9383                memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount);
9384                subpass->pInputAttachments = attachments;
9385                attachments += subpass->inputAttachmentCount;
9386
9387                memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9388                subpass->pColorAttachments = attachments;
9389                attachments += subpass->colorAttachmentCount;
9390
9391                if (subpass->pResolveAttachments) {
9392                    memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9393                    subpass->pResolveAttachments = attachments;
9394                    attachments += subpass->colorAttachmentCount;
9395                }
9396
9397                if (subpass->pDepthStencilAttachment) {
9398                    memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1);
9399                    subpass->pDepthStencilAttachment = attachments;
9400                    attachments += 1;
9401                }
9402
9403                memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount);
9404                subpass->pPreserveAttachments = &attachments->attachment;
9405            }
9406        }
9407        if (pCreateInfo->pDependencies) {
9408            localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
9409            memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies,
9410                   localRPCI->dependencyCount * sizeof(VkSubpassDependency));
9411        }
9412        dev_data->renderPassMap[*pRenderPass] = new RENDER_PASS_NODE(localRPCI);
9413        dev_data->renderPassMap[*pRenderPass]->hasSelfDependency = has_self_dependency;
9414        dev_data->renderPassMap[*pRenderPass]->subpassToNode = subpass_to_node;
9415#if MTMERGESOURCE
9416        // MTMTODO : Merge with code from above to eliminate duplication
9417        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9418            VkAttachmentDescription desc = pCreateInfo->pAttachments[i];
9419            MT_PASS_ATTACHMENT_INFO pass_info;
9420            pass_info.load_op = desc.loadOp;
9421            pass_info.store_op = desc.storeOp;
9422            pass_info.attachment = i;
9423            dev_data->renderPassMap[*pRenderPass]->attachments.push_back(pass_info);
9424        }
9425        // TODO: Maybe fill list and then copy instead of locking
9426        std::unordered_map<uint32_t, bool> &attachment_first_read = dev_data->renderPassMap[*pRenderPass]->attachment_first_read;
9427        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout =
9428            dev_data->renderPassMap[*pRenderPass]->attachment_first_layout;
9429        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9430            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9431            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9432                uint32_t attachment = subpass.pColorAttachments[j].attachment;
9433                if (attachment >= pCreateInfo->attachmentCount) {
9434                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9435                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9436                                         "Color attachment %d cannot be greater than the total number of attachments %d.",
9437                                         attachment, pCreateInfo->attachmentCount);
9438                    continue;
9439                }
9440                if (attachment_first_read.count(attachment))
9441                    continue;
9442                attachment_first_read.insert(std::make_pair(attachment, false));
9443                attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
9444            }
9445            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9446                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9447                if (attachment >= pCreateInfo->attachmentCount) {
9448                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9449                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9450                                         "Depth stencil attachment %d cannot be greater than the total number of attachments %d.",
9451                                         attachment, pCreateInfo->attachmentCount);
9452                    continue;
9453                }
9454                if (attachment_first_read.count(attachment))
9455                    continue;
9456                attachment_first_read.insert(std::make_pair(attachment, false));
9457                attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
9458            }
9459            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9460                uint32_t attachment = subpass.pInputAttachments[j].attachment;
9461                if (attachment >= pCreateInfo->attachmentCount) {
9462                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9463                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9464                                         "Input attachment %d cannot be greater than the total number of attachments %d.",
9465                                         attachment, pCreateInfo->attachmentCount);
9466                    continue;
9467                }
9468                if (attachment_first_read.count(attachment))
9469                    continue;
9470                attachment_first_read.insert(std::make_pair(attachment, true));
9471                attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
9472            }
9473        }
9474#endif
9475        loader_platform_thread_unlock_mutex(&globalLock);
9476    }
9477    return result;
9478}
9479// Free the renderpass shadow
9480static void deleteRenderPasses(layer_data *my_data) {
9481    if (my_data->renderPassMap.size() <= 0)
9482        return;
9483    for (auto ii = my_data->renderPassMap.begin(); ii != my_data->renderPassMap.end(); ++ii) {
9484        const VkRenderPassCreateInfo *pRenderPassInfo = (*ii).second->pCreateInfo;
9485        delete[] pRenderPassInfo->pAttachments;
9486        if (pRenderPassInfo->pSubpasses) {
9487            for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) {
9488                // Attachements are all allocated in a block, so just need to
9489                //  find the first non-null one to delete
9490                if (pRenderPassInfo->pSubpasses[i].pInputAttachments) {
9491                    delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments;
9492                } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) {
9493                    delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments;
9494                } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) {
9495                    delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments;
9496                } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) {
9497                    delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments;
9498                }
9499            }
9500            delete[] pRenderPassInfo->pSubpasses;
9501        }
9502        delete[] pRenderPassInfo->pDependencies;
9503        delete pRenderPassInfo;
9504        delete (*ii).second;
9505    }
9506    my_data->renderPassMap.clear();
9507}
9508
9509VkBool32 VerifyFramebufferAndRenderPassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9510    VkBool32 skip_call = VK_FALSE;
9511    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9512    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9513    const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
9514    const VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer].createInfo;
9515    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
9516        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9517                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
9518                                                                 "with a different number of attachments.");
9519    }
9520    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9521        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9522        auto image_data = dev_data->imageViewMap.find(image_view);
9523        assert(image_data != dev_data->imageViewMap.end());
9524        const VkImage &image = image_data->second.image;
9525        const VkImageSubresourceRange &subRange = image_data->second.subresourceRange;
9526        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
9527                                             pRenderPassInfo->pAttachments[i].initialLayout};
9528        // TODO: Do not iterate over every possibility - consolidate where possible
9529        for (uint32_t j = 0; j < subRange.levelCount; j++) {
9530            uint32_t level = subRange.baseMipLevel + j;
9531            for (uint32_t k = 0; k < subRange.layerCount; k++) {
9532                uint32_t layer = subRange.baseArrayLayer + k;
9533                VkImageSubresource sub = {subRange.aspectMask, level, layer};
9534                IMAGE_CMD_BUF_LAYOUT_NODE node;
9535                if (!FindLayout(pCB, image, sub, node)) {
9536                    SetLayout(pCB, image, sub, newNode);
9537                    continue;
9538                }
9539                if (newNode.layout != node.layout) {
9540                    skip_call |=
9541                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9542                                DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using attachment %i "
9543                                                                    "where the "
9544                                                                    "initial layout is %s and the layout of the attachment at the "
9545                                                                    "start of the render pass is %s. The layouts must match.",
9546                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
9547                }
9548            }
9549        }
9550    }
9551    return skip_call;
9552}
9553
9554void TransitionSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const int subpass_index) {
9555    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9556    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9557    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9558    if (render_pass_data == dev_data->renderPassMap.end()) {
9559        return;
9560    }
9561    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9562    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9563    if (framebuffer_data == dev_data->frameBufferMap.end()) {
9564        return;
9565    }
9566    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9567    const VkSubpassDescription &subpass = pRenderPassInfo->pSubpasses[subpass_index];
9568    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9569        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pInputAttachments[j].attachment];
9570        SetLayout(dev_data, pCB, image_view, subpass.pInputAttachments[j].layout);
9571    }
9572    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9573        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pColorAttachments[j].attachment];
9574        SetLayout(dev_data, pCB, image_view, subpass.pColorAttachments[j].layout);
9575    }
9576    if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9577        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pDepthStencilAttachment->attachment];
9578        SetLayout(dev_data, pCB, image_view, subpass.pDepthStencilAttachment->layout);
9579    }
9580}
9581
9582VkBool32 validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
9583    VkBool32 skip_call = VK_FALSE;
9584    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
9585        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9586                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
9587                             cmd_name.c_str());
9588    }
9589    return skip_call;
9590}
9591
9592void TransitionFinalSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9593    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9594    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9595    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9596    if (render_pass_data == dev_data->renderPassMap.end()) {
9597        return;
9598    }
9599    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9600    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9601    if (framebuffer_data == dev_data->frameBufferMap.end()) {
9602        return;
9603    }
9604    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9605    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9606        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9607        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
9608    }
9609}
9610
9611bool VerifyRenderAreaBounds(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
9612    bool skip_call = false;
9613    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
9614    if (pRenderPassBegin->renderArea.offset.x < 0 ||
9615        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
9616        pRenderPassBegin->renderArea.offset.y < 0 ||
9617        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
9618        skip_call |= static_cast<bool>(log_msg(
9619            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9620            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
9621            "Cannot execute a render pass with renderArea not within the bound of the "
9622            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
9623            "height %d.",
9624            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
9625            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
9626    }
9627    return skip_call;
9628}
9629
9630VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
9631vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
9632    VkBool32 skipCall = VK_FALSE;
9633    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9634    loader_platform_thread_lock_mutex(&globalLock);
9635    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9636    if (pCB) {
9637        if (pRenderPassBegin && pRenderPassBegin->renderPass) {
9638#if MTMERGE
9639            auto pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9640            if (pass_data != dev_data->renderPassMap.end()) {
9641                RENDER_PASS_NODE* pRPNode = pass_data->second;
9642                pRPNode->fb = pRenderPassBegin->framebuffer;
9643                auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
9644                for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9645                    MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i];
9646                    if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
9647                        if (cb_data != dev_data->commandBufferMap.end()) {
9648                            std::function<VkBool32()> function = [=]() {
9649                                set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9650                                return VK_FALSE;
9651                            };
9652                            cb_data->second->validate_functions.push_back(function);
9653                        }
9654                        VkImageLayout &attachment_layout = pRPNode->attachment_first_layout[pRPNode->attachments[i].attachment];
9655                        if (attachment_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL ||
9656                            attachment_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
9657                            skipCall |=
9658                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9659                                        VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, (uint64_t)(pRenderPassBegin->renderPass), __LINE__,
9660                                        MEMTRACK_INVALID_LAYOUT, "MEM", "Cannot clear attachment %d with invalid first layout %d.",
9661                                        pRPNode->attachments[i].attachment, attachment_layout);
9662                        }
9663                    } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE) {
9664                        if (cb_data != dev_data->commandBufferMap.end()) {
9665                            std::function<VkBool32()> function = [=]() {
9666                                set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9667                                return VK_FALSE;
9668                            };
9669                            cb_data->second->validate_functions.push_back(function);
9670                        }
9671                    } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
9672                        if (cb_data != dev_data->commandBufferMap.end()) {
9673                            std::function<VkBool32()> function = [=]() {
9674                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9675                            };
9676                            cb_data->second->validate_functions.push_back(function);
9677                        }
9678                    }
9679                    if (pRPNode->attachment_first_read[pRPNode->attachments[i].attachment]) {
9680                        if (cb_data != dev_data->commandBufferMap.end()) {
9681                            std::function<VkBool32()> function = [=]() {
9682                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9683                            };
9684                            cb_data->second->validate_functions.push_back(function);
9685                        }
9686                    }
9687                }
9688            }
9689#endif
9690            skipCall |= static_cast<VkBool32>(VerifyRenderAreaBounds(dev_data, pRenderPassBegin));
9691            skipCall |= VerifyFramebufferAndRenderPassLayouts(commandBuffer, pRenderPassBegin);
9692            auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9693            if (render_pass_data != dev_data->renderPassMap.end()) {
9694                skipCall |= ValidateDependencies(dev_data, pRenderPassBegin, render_pass_data->second->subpassToNode);
9695            }
9696            skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass");
9697            skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass");
9698            skipCall |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
9699            pCB->activeRenderPass = pRenderPassBegin->renderPass;
9700            // This is a shallow copy as that is all that is needed for now
9701            pCB->activeRenderPassBeginInfo = *pRenderPassBegin;
9702            pCB->activeSubpass = 0;
9703            pCB->activeSubpassContents = contents;
9704            pCB->framebuffer = pRenderPassBegin->framebuffer;
9705            // Connect this framebuffer to this cmdBuffer
9706            dev_data->frameBufferMap[pCB->framebuffer].referencingCmdBuffers.insert(pCB->commandBuffer);
9707        } else {
9708            skipCall |=
9709                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9710                        DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
9711        }
9712    }
9713    loader_platform_thread_unlock_mutex(&globalLock);
9714    if (VK_FALSE == skipCall) {
9715        dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
9716        loader_platform_thread_lock_mutex(&globalLock);
9717        // This is a shallow copy as that is all that is needed for now
9718        dev_data->renderPassBeginInfo = *pRenderPassBegin;
9719        dev_data->currentSubpass = 0;
9720        loader_platform_thread_unlock_mutex(&globalLock);
9721    }
9722}
9723
9724VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
9725    VkBool32 skipCall = VK_FALSE;
9726    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9727    loader_platform_thread_lock_mutex(&globalLock);
9728    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9729    TransitionSubpassLayouts(commandBuffer, &dev_data->renderPassBeginInfo, ++dev_data->currentSubpass);
9730    if (pCB) {
9731        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
9732        skipCall |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
9733        pCB->activeSubpass++;
9734        pCB->activeSubpassContents = contents;
9735        TransitionSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
9736        if (pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline) {
9737            skipCall |= validatePipelineState(dev_data, pCB, VK_PIPELINE_BIND_POINT_GRAPHICS,
9738                                              pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
9739        }
9740        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
9741    }
9742    loader_platform_thread_unlock_mutex(&globalLock);
9743    if (VK_FALSE == skipCall)
9744        dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents);
9745}
9746
9747VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(VkCommandBuffer commandBuffer) {
9748    VkBool32 skipCall = VK_FALSE;
9749    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9750    loader_platform_thread_lock_mutex(&globalLock);
9751#if MTMERGESOURCE
9752    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
9753    if (cb_data != dev_data->commandBufferMap.end()) {
9754        auto pass_data = dev_data->renderPassMap.find(cb_data->second->activeRenderPass);
9755        if (pass_data != dev_data->renderPassMap.end()) {
9756            RENDER_PASS_NODE* pRPNode = pass_data->second;
9757            for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9758                MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i];
9759                if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
9760                    if (cb_data != dev_data->commandBufferMap.end()) {
9761                        std::function<VkBool32()> function = [=]() {
9762                            set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9763                            return VK_FALSE;
9764                        };
9765                        cb_data->second->validate_functions.push_back(function);
9766                    }
9767                } else if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE) {
9768                    if (cb_data != dev_data->commandBufferMap.end()) {
9769                        std::function<VkBool32()> function = [=]() {
9770                            set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9771                            return VK_FALSE;
9772                        };
9773                        cb_data->second->validate_functions.push_back(function);
9774                    }
9775                }
9776            }
9777        }
9778    }
9779#endif
9780    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9781    TransitionFinalSubpassLayouts(commandBuffer, &dev_data->renderPassBeginInfo);
9782    if (pCB) {
9783        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
9784        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
9785        skipCall |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9786        TransitionFinalSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo);
9787        pCB->activeRenderPass = 0;
9788        pCB->activeSubpass = 0;
9789    }
9790    loader_platform_thread_unlock_mutex(&globalLock);
9791    if (VK_FALSE == skipCall)
9792        dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer);
9793}
9794
9795bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9796                                 VkRenderPass primaryPass, uint32_t primaryAttach, uint32_t secondaryAttach, const char *msg) {
9797    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9798                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9799                   "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9800                   " that is not compatible with the current render pass %" PRIx64 "."
9801                   "Attachment %" PRIu32 " is not compatable with %" PRIu32 ". %s",
9802                   (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass), primaryAttach, secondaryAttach,
9803                   msg);
9804}
9805
9806bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9807                                     uint32_t primaryAttach, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9808                                     uint32_t secondaryAttach, bool is_multi) {
9809    bool skip_call = false;
9810    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9811    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9812    if (primary_data->second->pCreateInfo->attachmentCount <= primaryAttach) {
9813        primaryAttach = VK_ATTACHMENT_UNUSED;
9814    }
9815    if (secondary_data->second->pCreateInfo->attachmentCount <= secondaryAttach) {
9816        secondaryAttach = VK_ATTACHMENT_UNUSED;
9817    }
9818    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
9819        return skip_call;
9820    }
9821    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
9822        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9823                                                 secondaryAttach, "The first is unused while the second is not.");
9824        return skip_call;
9825    }
9826    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
9827        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9828                                                 secondaryAttach, "The second is unused while the first is not.");
9829        return skip_call;
9830    }
9831    if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].format !=
9832        secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].format) {
9833        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9834                                                 secondaryAttach, "They have different formats.");
9835    }
9836    if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].samples !=
9837        secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].samples) {
9838        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9839                                                 secondaryAttach, "They have different samples.");
9840    }
9841    if (is_multi &&
9842        primary_data->second->pCreateInfo->pAttachments[primaryAttach].flags !=
9843            secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].flags) {
9844        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9845                                                 secondaryAttach, "They have different flags.");
9846    }
9847    return skip_call;
9848}
9849
9850bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9851                                  VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass, const int subpass, bool is_multi) {
9852    bool skip_call = false;
9853    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9854    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9855    const VkSubpassDescription &primary_desc = primary_data->second->pCreateInfo->pSubpasses[subpass];
9856    const VkSubpassDescription &secondary_desc = secondary_data->second->pCreateInfo->pSubpasses[subpass];
9857    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
9858    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
9859        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
9860        if (i < primary_desc.inputAttachmentCount) {
9861            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
9862        }
9863        if (i < secondary_desc.inputAttachmentCount) {
9864            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
9865        }
9866        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_input_attach, secondaryBuffer,
9867                                                     secondaryPass, secondary_input_attach, is_multi);
9868    }
9869    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
9870    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
9871        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
9872        if (i < primary_desc.colorAttachmentCount) {
9873            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
9874        }
9875        if (i < secondary_desc.colorAttachmentCount) {
9876            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
9877        }
9878        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_color_attach, secondaryBuffer,
9879                                                     secondaryPass, secondary_color_attach, is_multi);
9880        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
9881        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
9882            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
9883        }
9884        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
9885            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
9886        }
9887        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_resolve_attach, secondaryBuffer,
9888                                                     secondaryPass, secondary_resolve_attach, is_multi);
9889    }
9890    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
9891    if (primary_desc.pDepthStencilAttachment) {
9892        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
9893    }
9894    if (secondary_desc.pDepthStencilAttachment) {
9895        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
9896    }
9897    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_depthstencil_attach, secondaryBuffer,
9898                                                 secondaryPass, secondary_depthstencil_attach, is_multi);
9899    return skip_call;
9900}
9901
9902bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9903                                     VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass) {
9904    bool skip_call = false;
9905    // Early exit if renderPass objects are identical (and therefore compatible)
9906    if (primaryPass == secondaryPass)
9907        return skip_call;
9908    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9909    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9910    if (primary_data == dev_data->renderPassMap.end() || primary_data->second == nullptr) {
9911        skip_call |=
9912            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9913                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9914                    "vkCmdExecuteCommands() called w/ invalid current Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9915                    (void *)primaryBuffer, (uint64_t)(primaryPass));
9916        return skip_call;
9917    }
9918    if (secondary_data == dev_data->renderPassMap.end() || secondary_data->second == nullptr) {
9919        skip_call |=
9920            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9921                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9922                    "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9923                    (void *)secondaryBuffer, (uint64_t)(secondaryPass));
9924        return skip_call;
9925    }
9926    if (primary_data->second->pCreateInfo->subpassCount != secondary_data->second->pCreateInfo->subpassCount) {
9927        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9928                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9929                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9930                             " that is not compatible with the current render pass %" PRIx64 "."
9931                             "They have a different number of subpasses.",
9932                             (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass));
9933        return skip_call;
9934    }
9935    bool is_multi = primary_data->second->pCreateInfo->subpassCount > 1;
9936    for (uint32_t i = 0; i < primary_data->second->pCreateInfo->subpassCount; ++i) {
9937        skip_call |=
9938            validateSubpassCompatibility(dev_data, primaryBuffer, primaryPass, secondaryBuffer, secondaryPass, i, is_multi);
9939    }
9940    return skip_call;
9941}
9942
9943bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9944                         VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
9945    bool skip_call = false;
9946    if (!pSubCB->beginInfo.pInheritanceInfo) {
9947        return skip_call;
9948    }
9949    VkFramebuffer primary_fb = pCB->framebuffer;
9950    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9951    if (secondary_fb != VK_NULL_HANDLE) {
9952        if (primary_fb != secondary_fb) {
9953            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9954                                 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9955                                 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a framebuffer %" PRIx64
9956                                 " that is not compatible with the current framebuffer %" PRIx64 ".",
9957                                 (void *)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb));
9958        }
9959        auto fb_data = dev_data->frameBufferMap.find(secondary_fb);
9960        if (fb_data == dev_data->frameBufferMap.end()) {
9961            skip_call |=
9962                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9963                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9964                                                                          "which has invalid framebuffer %" PRIx64 ".",
9965                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
9966            return skip_call;
9967        }
9968        skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb_data->second.createInfo.renderPass,
9969                                                     secondaryBuffer, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9970    }
9971    return skip_call;
9972}
9973
9974bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9975    bool skipCall = false;
9976    unordered_set<int> activeTypes;
9977    for (auto queryObject : pCB->activeQueries) {
9978        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9979        if (queryPoolData != dev_data->queryPoolMap.end()) {
9980            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9981                pSubCB->beginInfo.pInheritanceInfo) {
9982                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
9983                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
9984                    skipCall |= log_msg(
9985                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9986                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9987                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9988                        "which has invalid active query pool %" PRIx64 ". Pipeline statistics is being queried so the command "
9989                        "buffer must have all bits set on the queryPool.",
9990                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
9991                }
9992            }
9993            activeTypes.insert(queryPoolData->second.createInfo.queryType);
9994        }
9995    }
9996    for (auto queryObject : pSubCB->startedQueries) {
9997        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9998        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
9999            skipCall |=
10000                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10001                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10002                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
10003                        "which has invalid active query pool %" PRIx64 "of type %d but a query of that type has been started on "
10004                        "secondary Cmd Buffer %p.",
10005                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
10006                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
10007        }
10008    }
10009    return skipCall;
10010}
10011
10012VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10013vkCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
10014    VkBool32 skipCall = VK_FALSE;
10015    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10016    loader_platform_thread_lock_mutex(&globalLock);
10017    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10018    if (pCB) {
10019        GLOBAL_CB_NODE *pSubCB = NULL;
10020        for (uint32_t i = 0; i < commandBuffersCount; i++) {
10021            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
10022            if (!pSubCB) {
10023                skipCall |=
10024                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10025                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10026                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p in element %u of pCommandBuffers array.",
10027                            (void *)pCommandBuffers[i], i);
10028            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
10029                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10030                                    __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10031                                    "vkCmdExecuteCommands() called w/ Primary Cmd Buffer %p in element %u of pCommandBuffers "
10032                                    "array. All cmd buffers in pCommandBuffers array must be secondary.",
10033                                    (void *)pCommandBuffers[i], i);
10034            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
10035                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
10036                    skipCall |= log_msg(
10037                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10038                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
10039                        "vkCmdExecuteCommands(): Secondary Command Buffer (%p) executed within render pass (%#" PRIxLEAST64
10040                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
10041                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass);
10042                } else {
10043                    // Make sure render pass is compatible with parent command buffer pass if has continue
10044                    skipCall |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass, pCommandBuffers[i],
10045                                                                pSubCB->beginInfo.pInheritanceInfo->renderPass);
10046                    skipCall |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
10047                }
10048                string errorString = "";
10049                if (!verify_renderpass_compatibility(dev_data, pCB->activeRenderPass,
10050                                                     pSubCB->beginInfo.pInheritanceInfo->renderPass, errorString)) {
10051                    skipCall |= log_msg(
10052                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10053                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
10054                        "vkCmdExecuteCommands(): Secondary Command Buffer (%p) w/ render pass (%#" PRIxLEAST64
10055                        ") is incompatible w/ primary command buffer (%p) w/ render pass (%#" PRIxLEAST64 ") due to: %s",
10056                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
10057                        (uint64_t)pCB->activeRenderPass, errorString.c_str());
10058                }
10059                //  If framebuffer for secondary CB is not NULL, then it must match FB from vkCmdBeginRenderPass()
10060                //   that this CB will be executed in AND framebuffer must have been created w/ RP compatible w/ renderpass
10061                if (pSubCB->beginInfo.pInheritanceInfo->framebuffer) {
10062                    if (pSubCB->beginInfo.pInheritanceInfo->framebuffer != pCB->activeRenderPassBeginInfo.framebuffer) {
10063                        skipCall |= log_msg(
10064                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10065                            (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
10066                            "vkCmdExecuteCommands(): Secondary Command Buffer (%p) references framebuffer (%#" PRIxLEAST64
10067                            ") that does not match framebuffer (%#" PRIxLEAST64 ") in active renderpass (%#" PRIxLEAST64 ").",
10068                            (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer,
10069                            (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass);
10070                    }
10071                }
10072            }
10073            // TODO(mlentine): Move more logic into this method
10074            skipCall |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
10075            skipCall |= validateCommandBufferState(dev_data, pSubCB);
10076            // Secondary cmdBuffers are considered pending execution starting w/
10077            // being recorded
10078            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
10079                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
10080                    skipCall |= log_msg(
10081                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10082                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10083                        "Attempt to simultaneously execute CB %#" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10084                        "set!",
10085                        (uint64_t)(pCB->commandBuffer));
10086                }
10087                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
10088                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
10089                    skipCall |= log_msg(
10090                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10091                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10092                        "vkCmdExecuteCommands(): Secondary Command Buffer (%#" PRIxLEAST64
10093                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
10094                        "(%#" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10095                                          "set, even though it does.",
10096                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
10097                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
10098                }
10099            }
10100            if (!pCB->activeQueries.empty() && !dev_data->physDevProperties.features.inheritedQueries) {
10101                skipCall |=
10102                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10103                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
10104                            "vkCmdExecuteCommands(): Secondary Command Buffer "
10105                            "(%#" PRIxLEAST64 ") cannot be submitted with a query in "
10106                            "flight and inherited queries not "
10107                            "supported on this device.",
10108                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
10109            }
10110            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
10111            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
10112            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
10113        }
10114        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
10115        skipCall |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
10116    }
10117    loader_platform_thread_unlock_mutex(&globalLock);
10118    if (VK_FALSE == skipCall)
10119        dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
10120}
10121
10122VkBool32 ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) {
10123    VkBool32 skip_call = VK_FALSE;
10124    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10125    auto mem_data = dev_data->memObjMap.find(mem);
10126    if ((mem_data != dev_data->memObjMap.end()) && (mem_data->second.image != VK_NULL_HANDLE)) {
10127        std::vector<VkImageLayout> layouts;
10128        if (FindLayouts(dev_data, mem_data->second.image, layouts)) {
10129            for (auto layout : layouts) {
10130                if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
10131                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10132                                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
10133                                                                                         "GENERAL or PREINITIALIZED are supported.",
10134                                         string_VkImageLayout(layout));
10135                }
10136            }
10137        }
10138    }
10139    return skip_call;
10140}
10141
10142VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10143vkMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
10144    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10145
10146    VkBool32 skip_call = VK_FALSE;
10147    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10148    loader_platform_thread_lock_mutex(&globalLock);
10149#if MTMERGESOURCE
10150    DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
10151    if (pMemObj) {
10152        pMemObj->valid = true;
10153        if ((memProps.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
10154            skip_call =
10155                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10156                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
10157                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj %#" PRIxLEAST64, (uint64_t)mem);
10158        }
10159    }
10160    skip_call |= validateMemRange(dev_data, mem, offset, size);
10161    storeMemRanges(dev_data, mem, offset, size);
10162#endif
10163    skip_call |= ValidateMapImageLayouts(device, mem);
10164    loader_platform_thread_unlock_mutex(&globalLock);
10165
10166    if (VK_FALSE == skip_call) {
10167        result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData);
10168#if MTMERGESOURCE
10169        loader_platform_thread_lock_mutex(&globalLock);
10170        initializeAndTrackMemory(dev_data, mem, size, ppData);
10171        loader_platform_thread_unlock_mutex(&globalLock);
10172#endif
10173    }
10174    return result;
10175}
10176
10177#if MTMERGESOURCE
10178VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(VkDevice device, VkDeviceMemory mem) {
10179    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10180    VkBool32 skipCall = VK_FALSE;
10181
10182    loader_platform_thread_lock_mutex(&globalLock);
10183    skipCall |= deleteMemRanges(my_data, mem);
10184    loader_platform_thread_unlock_mutex(&globalLock);
10185    if (VK_FALSE == skipCall) {
10186        my_data->device_dispatch_table->UnmapMemory(device, mem);
10187    }
10188}
10189
10190VkBool32 validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount,
10191                                const VkMappedMemoryRange *pMemRanges) {
10192    VkBool32 skipCall = VK_FALSE;
10193    for (uint32_t i = 0; i < memRangeCount; ++i) {
10194        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
10195        if (mem_element != my_data->memObjMap.end()) {
10196            if (mem_element->second.memRange.offset > pMemRanges[i].offset) {
10197                skipCall |= log_msg(
10198                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10199                    (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10200                    "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
10201                    "(" PRINTF_SIZE_T_SPECIFIER ").",
10202                    funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_element->second.memRange.offset));
10203            }
10204            if ((mem_element->second.memRange.size != VK_WHOLE_SIZE) &&
10205                ((mem_element->second.memRange.offset + mem_element->second.memRange.size) <
10206                 (pMemRanges[i].offset + pMemRanges[i].size))) {
10207                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10208                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10209                                    MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
10210                                                                 ") exceeds the Memory Object's upper-bound "
10211                                                                 "(" PRINTF_SIZE_T_SPECIFIER ").",
10212                                    funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
10213                                    static_cast<size_t>(mem_element->second.memRange.offset + mem_element->second.memRange.size));
10214            }
10215        }
10216    }
10217    return skipCall;
10218}
10219
10220VkBool32 validateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount,
10221                                                  const VkMappedMemoryRange *pMemRanges) {
10222    VkBool32 skipCall = VK_FALSE;
10223    for (uint32_t i = 0; i < memRangeCount; ++i) {
10224        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
10225        if (mem_element != my_data->memObjMap.end()) {
10226            if (mem_element->second.pData) {
10227                VkDeviceSize size = mem_element->second.memRange.size;
10228                VkDeviceSize half_size = (size / 2);
10229                char *data = static_cast<char *>(mem_element->second.pData);
10230                for (auto j = 0; j < half_size; ++j) {
10231                    if (data[j] != NoncoherentMemoryFillValue) {
10232                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10233                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10234                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
10235                                            (uint64_t)pMemRanges[i].memory);
10236                    }
10237                }
10238                for (auto j = size + half_size; j < 2 * size; ++j) {
10239                    if (data[j] != NoncoherentMemoryFillValue) {
10240                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10241                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10242                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
10243                                            (uint64_t)pMemRanges[i].memory);
10244                    }
10245                }
10246                memcpy(mem_element->second.pDriverData, static_cast<void *>(data + (size_t)(half_size)), (size_t)(size));
10247            }
10248        }
10249    }
10250    return skipCall;
10251}
10252
10253VK_LAYER_EXPORT VkResult VKAPI_CALL
10254vkFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10255    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10256    VkBool32 skipCall = VK_FALSE;
10257    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10258
10259    loader_platform_thread_lock_mutex(&globalLock);
10260    skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
10261    skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
10262    loader_platform_thread_unlock_mutex(&globalLock);
10263    if (VK_FALSE == skipCall) {
10264        result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
10265    }
10266    return result;
10267}
10268
10269VK_LAYER_EXPORT VkResult VKAPI_CALL
10270vkInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10271    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10272    VkBool32 skipCall = VK_FALSE;
10273    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10274
10275    loader_platform_thread_lock_mutex(&globalLock);
10276    skipCall |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
10277    loader_platform_thread_unlock_mutex(&globalLock);
10278    if (VK_FALSE == skipCall) {
10279        result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
10280    }
10281    return result;
10282}
10283#endif
10284
10285VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
10286    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10287    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10288    VkBool32 skipCall = VK_FALSE;
10289#if MTMERGESOURCE
10290    loader_platform_thread_lock_mutex(&globalLock);
10291    // Track objects tied to memory
10292    uint64_t image_handle = (uint64_t)(image);
10293    skipCall =
10294        set_mem_binding(dev_data, device, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
10295    add_object_binding_info(dev_data, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, mem);
10296    {
10297        VkMemoryRequirements memRequirements;
10298        vkGetImageMemoryRequirements(device, image, &memRequirements);
10299        skipCall |= validate_buffer_image_aliasing(dev_data, image_handle, mem, memoryOffset, memRequirements,
10300                                                   dev_data->memObjMap[mem].imageRanges, dev_data->memObjMap[mem].bufferRanges,
10301                                                   VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
10302    }
10303    print_mem_list(dev_data, device);
10304    loader_platform_thread_unlock_mutex(&globalLock);
10305#endif
10306    if (VK_FALSE == skipCall) {
10307        result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset);
10308        VkMemoryRequirements memRequirements;
10309        dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements);
10310        loader_platform_thread_lock_mutex(&globalLock);
10311        dev_data->memObjMap[mem].image = image;
10312        dev_data->imageMap[image].mem = mem;
10313        dev_data->imageMap[image].memOffset = memoryOffset;
10314        dev_data->imageMap[image].memSize = memRequirements.size;
10315        loader_platform_thread_unlock_mutex(&globalLock);
10316    }
10317    return result;
10318}
10319
10320VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(VkDevice device, VkEvent event) {
10321    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10322    loader_platform_thread_lock_mutex(&globalLock);
10323    dev_data->eventMap[event].needsSignaled = false;
10324    dev_data->eventMap[event].stageMask = VK_PIPELINE_STAGE_HOST_BIT;
10325    loader_platform_thread_unlock_mutex(&globalLock);
10326    VkResult result = dev_data->device_dispatch_table->SetEvent(device, event);
10327    return result;
10328}
10329
10330VKAPI_ATTR VkResult VKAPI_CALL
10331vkQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
10332    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10333    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10334    VkBool32 skip_call = VK_FALSE;
10335#if MTMERGESOURCE
10336    //MTMTODO : Merge this code with the checks below
10337    loader_platform_thread_lock_mutex(&globalLock);
10338
10339    for (uint32_t i = 0; i < bindInfoCount; i++) {
10340        const VkBindSparseInfo *bindInfo = &pBindInfo[i];
10341        // Track objects tied to memory
10342        for (uint32_t j = 0; j < bindInfo->bufferBindCount; j++) {
10343            for (uint32_t k = 0; k < bindInfo->pBufferBinds[j].bindCount; k++) {
10344                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pBufferBinds[j].pBinds[k].memory,
10345                                           (uint64_t)bindInfo->pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
10346                                           "vkQueueBindSparse"))
10347                    skip_call = VK_TRUE;
10348            }
10349        }
10350        for (uint32_t j = 0; j < bindInfo->imageOpaqueBindCount; j++) {
10351            for (uint32_t k = 0; k < bindInfo->pImageOpaqueBinds[j].bindCount; k++) {
10352                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pImageOpaqueBinds[j].pBinds[k].memory,
10353                                           (uint64_t)bindInfo->pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10354                                           "vkQueueBindSparse"))
10355                    skip_call = VK_TRUE;
10356            }
10357        }
10358        for (uint32_t j = 0; j < bindInfo->imageBindCount; j++) {
10359            for (uint32_t k = 0; k < bindInfo->pImageBinds[j].bindCount; k++) {
10360                if (set_sparse_mem_binding(dev_data, queue, bindInfo->pImageBinds[j].pBinds[k].memory,
10361                                           (uint64_t)bindInfo->pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10362                                           "vkQueueBindSparse"))
10363                    skip_call = VK_TRUE;
10364            }
10365        }
10366        // Validate semaphore state
10367        for (uint32_t i = 0; i < bindInfo->waitSemaphoreCount; i++) {
10368            VkSemaphore sem = bindInfo->pWaitSemaphores[i];
10369
10370            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10371                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_SIGNALLED) {
10372                    skip_call =
10373                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10374                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10375                                "vkQueueBindSparse: Semaphore must be in signaled state before passing to pWaitSemaphores");
10376                }
10377                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_WAIT;
10378            }
10379        }
10380        for (uint32_t i = 0; i < bindInfo->signalSemaphoreCount; i++) {
10381            VkSemaphore sem = bindInfo->pSignalSemaphores[i];
10382
10383            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10384                if (dev_data->semaphoreMap[sem].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
10385                    skip_call =
10386                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10387                                (uint64_t)sem, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10388                                "vkQueueBindSparse: Semaphore must not be currently signaled or in a wait state");
10389                }
10390                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
10391            }
10392        }
10393    }
10394
10395    print_mem_list(dev_data, queue);
10396    loader_platform_thread_unlock_mutex(&globalLock);
10397#endif
10398    loader_platform_thread_lock_mutex(&globalLock);
10399    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
10400        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
10401        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10402            if (dev_data->semaphoreMap[bindInfo.pWaitSemaphores[i]].signaled) {
10403                dev_data->semaphoreMap[bindInfo.pWaitSemaphores[i]].signaled = 0;
10404            } else {
10405                skip_call |=
10406                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10407                            __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10408                            "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
10409                            (uint64_t)(queue), (uint64_t)(bindInfo.pWaitSemaphores[i]));
10410            }
10411        }
10412        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
10413            dev_data->semaphoreMap[bindInfo.pSignalSemaphores[i]].signaled = 1;
10414        }
10415    }
10416    loader_platform_thread_unlock_mutex(&globalLock);
10417
10418    if (VK_FALSE == skip_call)
10419        return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
10420#if MTMERGESOURCE
10421    // Update semaphore state
10422    loader_platform_thread_lock_mutex(&globalLock);
10423    for (uint32_t bind_info_idx = 0; bind_info_idx < bindInfoCount; bind_info_idx++) {
10424        const VkBindSparseInfo *bindInfo = &pBindInfo[bind_info_idx];
10425        for (uint32_t i = 0; i < bindInfo->waitSemaphoreCount; i++) {
10426            VkSemaphore sem = bindInfo->pWaitSemaphores[i];
10427
10428            if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10429                dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10430            }
10431        }
10432    }
10433    loader_platform_thread_unlock_mutex(&globalLock);
10434#endif
10435
10436    return result;
10437}
10438
10439VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
10440                                                 const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
10441    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10442    VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
10443    if (result == VK_SUCCESS) {
10444        loader_platform_thread_lock_mutex(&globalLock);
10445        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
10446        sNode->signaled = 0;
10447        sNode->queue = VK_NULL_HANDLE;
10448        sNode->in_use.store(0);
10449        sNode->state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10450        loader_platform_thread_unlock_mutex(&globalLock);
10451    }
10452    return result;
10453}
10454
10455VKAPI_ATTR VkResult VKAPI_CALL
10456vkCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
10457    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10458    VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
10459    if (result == VK_SUCCESS) {
10460        loader_platform_thread_lock_mutex(&globalLock);
10461        dev_data->eventMap[*pEvent].needsSignaled = false;
10462        dev_data->eventMap[*pEvent].in_use.store(0);
10463        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
10464        loader_platform_thread_unlock_mutex(&globalLock);
10465    }
10466    return result;
10467}
10468
10469VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
10470                                                                    const VkAllocationCallbacks *pAllocator,
10471                                                                    VkSwapchainKHR *pSwapchain) {
10472    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10473    VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
10474
10475    if (VK_SUCCESS == result) {
10476        SWAPCHAIN_NODE *psc_node = new SWAPCHAIN_NODE(pCreateInfo);
10477        loader_platform_thread_lock_mutex(&globalLock);
10478        dev_data->device_extensions.swapchainMap[*pSwapchain] = psc_node;
10479        loader_platform_thread_unlock_mutex(&globalLock);
10480    }
10481
10482    return result;
10483}
10484
10485VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10486vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
10487    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10488    bool skipCall = false;
10489
10490    loader_platform_thread_lock_mutex(&globalLock);
10491    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(swapchain);
10492    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
10493        if (swapchain_data->second->images.size() > 0) {
10494            for (auto swapchain_image : swapchain_data->second->images) {
10495                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
10496                if (image_sub != dev_data->imageSubresourceMap.end()) {
10497                    for (auto imgsubpair : image_sub->second) {
10498                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
10499                        if (image_item != dev_data->imageLayoutMap.end()) {
10500                            dev_data->imageLayoutMap.erase(image_item);
10501                        }
10502                    }
10503                    dev_data->imageSubresourceMap.erase(image_sub);
10504                }
10505#if MTMERGESOURCE
10506                skipCall = clear_object_binding(dev_data, device, (uint64_t)swapchain_image,
10507                                                VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
10508                dev_data->imageBindingMap.erase((uint64_t)swapchain_image);
10509#endif
10510            }
10511        }
10512        delete swapchain_data->second;
10513        dev_data->device_extensions.swapchainMap.erase(swapchain);
10514    }
10515    loader_platform_thread_unlock_mutex(&globalLock);
10516    if (!skipCall)
10517        dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator);
10518}
10519
10520VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10521vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
10522    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10523    VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
10524
10525    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
10526        // This should never happen and is checked by param checker.
10527        if (!pCount)
10528            return result;
10529        loader_platform_thread_lock_mutex(&globalLock);
10530        const size_t count = *pCount;
10531        auto swapchain_node = dev_data->device_extensions.swapchainMap[swapchain];
10532        if (!swapchain_node->images.empty()) {
10533            // TODO : Not sure I like the memcmp here, but it works
10534            const bool mismatch = (swapchain_node->images.size() != count ||
10535                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
10536            if (mismatch) {
10537                // TODO: Verify against Valid Usage section of extension
10538                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10539                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
10540                        "vkGetSwapchainInfoKHR(%" PRIu64
10541                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
10542                        (uint64_t)(swapchain));
10543            }
10544        }
10545        for (uint32_t i = 0; i < *pCount; ++i) {
10546            IMAGE_LAYOUT_NODE image_layout_node;
10547            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
10548            image_layout_node.format = swapchain_node->createInfo.imageFormat;
10549            dev_data->imageMap[pSwapchainImages[i]].createInfo.mipLevels = 1;
10550            dev_data->imageMap[pSwapchainImages[i]].createInfo.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
10551            swapchain_node->images.push_back(pSwapchainImages[i]);
10552            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
10553            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
10554            dev_data->imageLayoutMap[subpair] = image_layout_node;
10555            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
10556        }
10557        if (!swapchain_node->images.empty()) {
10558            for (auto image : swapchain_node->images) {
10559                // Add image object binding, then insert the new Mem Object and then bind it to created image
10560#if MTMERGESOURCE
10561                add_object_create_info(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10562                                       &swapchain_node->createInfo);
10563#endif
10564            }
10565        }
10566        loader_platform_thread_unlock_mutex(&globalLock);
10567    }
10568    return result;
10569}
10570
10571VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
10572    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10573    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10574    bool skip_call = false;
10575
10576    if (pPresentInfo) {
10577        loader_platform_thread_lock_mutex(&globalLock);
10578        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10579            if (dev_data->semaphoreMap[pPresentInfo->pWaitSemaphores[i]].signaled) {
10580                dev_data->semaphoreMap[pPresentInfo->pWaitSemaphores[i]].signaled = 0;
10581            } else {
10582                skip_call |=
10583                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
10584                            __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10585                            "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
10586                            (uint64_t)(queue), (uint64_t)(pPresentInfo->pWaitSemaphores[i]));
10587            }
10588        }
10589        VkDeviceMemory mem;
10590        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10591            auto swapchain_data = dev_data->device_extensions.swapchainMap.find(pPresentInfo->pSwapchains[i]);
10592            if (swapchain_data != dev_data->device_extensions.swapchainMap.end() &&
10593                pPresentInfo->pImageIndices[i] < swapchain_data->second->images.size()) {
10594                VkImage image = swapchain_data->second->images[pPresentInfo->pImageIndices[i]];
10595#if MTMERGESOURCE
10596                skip_call |=
10597                    get_mem_binding_from_object(dev_data, queue, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
10598                skip_call |= validate_memory_is_valid(dev_data, mem, "vkQueuePresentKHR()", image);
10599#endif
10600                vector<VkImageLayout> layouts;
10601                if (FindLayouts(dev_data, image, layouts)) {
10602                    for (auto layout : layouts) {
10603                        if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
10604                            skip_call |=
10605                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
10606                                        reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10607                                        "Images passed to present must be in layout "
10608                                        "PRESENT_SOURCE_KHR but is in %s",
10609                                        string_VkImageLayout(layout));
10610                        }
10611                    }
10612                }
10613            }
10614        }
10615        loader_platform_thread_unlock_mutex(&globalLock);
10616    }
10617
10618    if (!skip_call)
10619        result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo);
10620#if MTMERGESOURCE
10621    loader_platform_thread_lock_mutex(&globalLock);
10622    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; i++) {
10623        VkSemaphore sem = pPresentInfo->pWaitSemaphores[i];
10624        if (dev_data->semaphoreMap.find(sem) != dev_data->semaphoreMap.end()) {
10625            dev_data->semaphoreMap[sem].state = MEMTRACK_SEMAPHORE_STATE_UNSET;
10626        }
10627    }
10628    loader_platform_thread_unlock_mutex(&globalLock);
10629#endif
10630    return result;
10631}
10632
10633VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
10634                                                     VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
10635    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10636    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10637    bool skipCall = false;
10638#if MTMERGESOURCE
10639    loader_platform_thread_lock_mutex(&globalLock);
10640    if (semaphore != VK_NULL_HANDLE &&
10641        dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
10642        if (dev_data->semaphoreMap[semaphore].state != MEMTRACK_SEMAPHORE_STATE_UNSET) {
10643            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10644                               (uint64_t)semaphore, __LINE__, MEMTRACK_NONE, "SEMAPHORE",
10645                               "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
10646        }
10647        dev_data->semaphoreMap[semaphore].state = MEMTRACK_SEMAPHORE_STATE_SIGNALLED;
10648    }
10649    auto fence_data = dev_data->fenceMap.find(fence);
10650    if (fence_data != dev_data->fenceMap.end()) {
10651        fence_data->second.swapchain = swapchain;
10652    }
10653    loader_platform_thread_unlock_mutex(&globalLock);
10654#endif
10655    if (!skipCall) {
10656        result =
10657            dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
10658    }
10659    loader_platform_thread_lock_mutex(&globalLock);
10660    // FIXME/TODO: Need to add some thing code the "fence" parameter
10661    dev_data->semaphoreMap[semaphore].signaled = 1;
10662    loader_platform_thread_unlock_mutex(&globalLock);
10663    return result;
10664}
10665
10666VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10667vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10668                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
10669    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10670    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10671    VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10672    if (VK_SUCCESS == res) {
10673        loader_platform_thread_lock_mutex(&globalLock);
10674        res = layer_create_msg_callback(my_data->report_data, pCreateInfo, pAllocator, pMsgCallback);
10675        loader_platform_thread_unlock_mutex(&globalLock);
10676    }
10677    return res;
10678}
10679
10680VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance,
10681                                                                           VkDebugReportCallbackEXT msgCallback,
10682                                                                           const VkAllocationCallbacks *pAllocator) {
10683    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10684    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10685    pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10686    loader_platform_thread_lock_mutex(&globalLock);
10687    layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
10688    loader_platform_thread_unlock_mutex(&globalLock);
10689}
10690
10691VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10692vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
10693                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10694    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10695    my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
10696                                                            pMsg);
10697}
10698
10699VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10700    if (!strcmp(funcName, "vkGetDeviceProcAddr"))
10701        return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
10702    if (!strcmp(funcName, "vkDestroyDevice"))
10703        return (PFN_vkVoidFunction)vkDestroyDevice;
10704    if (!strcmp(funcName, "vkQueueSubmit"))
10705        return (PFN_vkVoidFunction)vkQueueSubmit;
10706    if (!strcmp(funcName, "vkWaitForFences"))
10707        return (PFN_vkVoidFunction)vkWaitForFences;
10708    if (!strcmp(funcName, "vkGetFenceStatus"))
10709        return (PFN_vkVoidFunction)vkGetFenceStatus;
10710    if (!strcmp(funcName, "vkQueueWaitIdle"))
10711        return (PFN_vkVoidFunction)vkQueueWaitIdle;
10712    if (!strcmp(funcName, "vkDeviceWaitIdle"))
10713        return (PFN_vkVoidFunction)vkDeviceWaitIdle;
10714    if (!strcmp(funcName, "vkGetDeviceQueue"))
10715        return (PFN_vkVoidFunction)vkGetDeviceQueue;
10716    if (!strcmp(funcName, "vkDestroyInstance"))
10717        return (PFN_vkVoidFunction)vkDestroyInstance;
10718    if (!strcmp(funcName, "vkDestroyDevice"))
10719        return (PFN_vkVoidFunction)vkDestroyDevice;
10720    if (!strcmp(funcName, "vkDestroyFence"))
10721        return (PFN_vkVoidFunction)vkDestroyFence;
10722    if (!strcmp(funcName, "vkResetFences"))
10723        return (PFN_vkVoidFunction)vkResetFences;
10724    if (!strcmp(funcName, "vkDestroySemaphore"))
10725        return (PFN_vkVoidFunction)vkDestroySemaphore;
10726    if (!strcmp(funcName, "vkDestroyEvent"))
10727        return (PFN_vkVoidFunction)vkDestroyEvent;
10728    if (!strcmp(funcName, "vkDestroyQueryPool"))
10729        return (PFN_vkVoidFunction)vkDestroyQueryPool;
10730    if (!strcmp(funcName, "vkDestroyBuffer"))
10731        return (PFN_vkVoidFunction)vkDestroyBuffer;
10732    if (!strcmp(funcName, "vkDestroyBufferView"))
10733        return (PFN_vkVoidFunction)vkDestroyBufferView;
10734    if (!strcmp(funcName, "vkDestroyImage"))
10735        return (PFN_vkVoidFunction)vkDestroyImage;
10736    if (!strcmp(funcName, "vkDestroyImageView"))
10737        return (PFN_vkVoidFunction)vkDestroyImageView;
10738    if (!strcmp(funcName, "vkDestroyShaderModule"))
10739        return (PFN_vkVoidFunction)vkDestroyShaderModule;
10740    if (!strcmp(funcName, "vkDestroyPipeline"))
10741        return (PFN_vkVoidFunction)vkDestroyPipeline;
10742    if (!strcmp(funcName, "vkDestroyPipelineLayout"))
10743        return (PFN_vkVoidFunction)vkDestroyPipelineLayout;
10744    if (!strcmp(funcName, "vkDestroySampler"))
10745        return (PFN_vkVoidFunction)vkDestroySampler;
10746    if (!strcmp(funcName, "vkDestroyDescriptorSetLayout"))
10747        return (PFN_vkVoidFunction)vkDestroyDescriptorSetLayout;
10748    if (!strcmp(funcName, "vkDestroyDescriptorPool"))
10749        return (PFN_vkVoidFunction)vkDestroyDescriptorPool;
10750    if (!strcmp(funcName, "vkDestroyFramebuffer"))
10751        return (PFN_vkVoidFunction)vkDestroyFramebuffer;
10752    if (!strcmp(funcName, "vkDestroyRenderPass"))
10753        return (PFN_vkVoidFunction)vkDestroyRenderPass;
10754    if (!strcmp(funcName, "vkCreateBuffer"))
10755        return (PFN_vkVoidFunction)vkCreateBuffer;
10756    if (!strcmp(funcName, "vkCreateBufferView"))
10757        return (PFN_vkVoidFunction)vkCreateBufferView;
10758    if (!strcmp(funcName, "vkCreateImage"))
10759        return (PFN_vkVoidFunction)vkCreateImage;
10760    if (!strcmp(funcName, "vkCreateImageView"))
10761        return (PFN_vkVoidFunction)vkCreateImageView;
10762    if (!strcmp(funcName, "vkCreateFence"))
10763        return (PFN_vkVoidFunction)vkCreateFence;
10764    if (!strcmp(funcName, "CreatePipelineCache"))
10765        return (PFN_vkVoidFunction)vkCreatePipelineCache;
10766    if (!strcmp(funcName, "DestroyPipelineCache"))
10767        return (PFN_vkVoidFunction)vkDestroyPipelineCache;
10768    if (!strcmp(funcName, "GetPipelineCacheData"))
10769        return (PFN_vkVoidFunction)vkGetPipelineCacheData;
10770    if (!strcmp(funcName, "MergePipelineCaches"))
10771        return (PFN_vkVoidFunction)vkMergePipelineCaches;
10772    if (!strcmp(funcName, "vkCreateGraphicsPipelines"))
10773        return (PFN_vkVoidFunction)vkCreateGraphicsPipelines;
10774    if (!strcmp(funcName, "vkCreateComputePipelines"))
10775        return (PFN_vkVoidFunction)vkCreateComputePipelines;
10776    if (!strcmp(funcName, "vkCreateSampler"))
10777        return (PFN_vkVoidFunction)vkCreateSampler;
10778    if (!strcmp(funcName, "vkCreateDescriptorSetLayout"))
10779        return (PFN_vkVoidFunction)vkCreateDescriptorSetLayout;
10780    if (!strcmp(funcName, "vkCreatePipelineLayout"))
10781        return (PFN_vkVoidFunction)vkCreatePipelineLayout;
10782    if (!strcmp(funcName, "vkCreateDescriptorPool"))
10783        return (PFN_vkVoidFunction)vkCreateDescriptorPool;
10784    if (!strcmp(funcName, "vkResetDescriptorPool"))
10785        return (PFN_vkVoidFunction)vkResetDescriptorPool;
10786    if (!strcmp(funcName, "vkAllocateDescriptorSets"))
10787        return (PFN_vkVoidFunction)vkAllocateDescriptorSets;
10788    if (!strcmp(funcName, "vkFreeDescriptorSets"))
10789        return (PFN_vkVoidFunction)vkFreeDescriptorSets;
10790    if (!strcmp(funcName, "vkUpdateDescriptorSets"))
10791        return (PFN_vkVoidFunction)vkUpdateDescriptorSets;
10792    if (!strcmp(funcName, "vkCreateCommandPool"))
10793        return (PFN_vkVoidFunction)vkCreateCommandPool;
10794    if (!strcmp(funcName, "vkDestroyCommandPool"))
10795        return (PFN_vkVoidFunction)vkDestroyCommandPool;
10796    if (!strcmp(funcName, "vkResetCommandPool"))
10797        return (PFN_vkVoidFunction)vkResetCommandPool;
10798    if (!strcmp(funcName, "vkCreateQueryPool"))
10799        return (PFN_vkVoidFunction)vkCreateQueryPool;
10800    if (!strcmp(funcName, "vkAllocateCommandBuffers"))
10801        return (PFN_vkVoidFunction)vkAllocateCommandBuffers;
10802    if (!strcmp(funcName, "vkFreeCommandBuffers"))
10803        return (PFN_vkVoidFunction)vkFreeCommandBuffers;
10804    if (!strcmp(funcName, "vkBeginCommandBuffer"))
10805        return (PFN_vkVoidFunction)vkBeginCommandBuffer;
10806    if (!strcmp(funcName, "vkEndCommandBuffer"))
10807        return (PFN_vkVoidFunction)vkEndCommandBuffer;
10808    if (!strcmp(funcName, "vkResetCommandBuffer"))
10809        return (PFN_vkVoidFunction)vkResetCommandBuffer;
10810    if (!strcmp(funcName, "vkCmdBindPipeline"))
10811        return (PFN_vkVoidFunction)vkCmdBindPipeline;
10812    if (!strcmp(funcName, "vkCmdSetViewport"))
10813        return (PFN_vkVoidFunction)vkCmdSetViewport;
10814    if (!strcmp(funcName, "vkCmdSetScissor"))
10815        return (PFN_vkVoidFunction)vkCmdSetScissor;
10816    if (!strcmp(funcName, "vkCmdSetLineWidth"))
10817        return (PFN_vkVoidFunction)vkCmdSetLineWidth;
10818    if (!strcmp(funcName, "vkCmdSetDepthBias"))
10819        return (PFN_vkVoidFunction)vkCmdSetDepthBias;
10820    if (!strcmp(funcName, "vkCmdSetBlendConstants"))
10821        return (PFN_vkVoidFunction)vkCmdSetBlendConstants;
10822    if (!strcmp(funcName, "vkCmdSetDepthBounds"))
10823        return (PFN_vkVoidFunction)vkCmdSetDepthBounds;
10824    if (!strcmp(funcName, "vkCmdSetStencilCompareMask"))
10825        return (PFN_vkVoidFunction)vkCmdSetStencilCompareMask;
10826    if (!strcmp(funcName, "vkCmdSetStencilWriteMask"))
10827        return (PFN_vkVoidFunction)vkCmdSetStencilWriteMask;
10828    if (!strcmp(funcName, "vkCmdSetStencilReference"))
10829        return (PFN_vkVoidFunction)vkCmdSetStencilReference;
10830    if (!strcmp(funcName, "vkCmdBindDescriptorSets"))
10831        return (PFN_vkVoidFunction)vkCmdBindDescriptorSets;
10832    if (!strcmp(funcName, "vkCmdBindVertexBuffers"))
10833        return (PFN_vkVoidFunction)vkCmdBindVertexBuffers;
10834    if (!strcmp(funcName, "vkCmdBindIndexBuffer"))
10835        return (PFN_vkVoidFunction)vkCmdBindIndexBuffer;
10836    if (!strcmp(funcName, "vkCmdDraw"))
10837        return (PFN_vkVoidFunction)vkCmdDraw;
10838    if (!strcmp(funcName, "vkCmdDrawIndexed"))
10839        return (PFN_vkVoidFunction)vkCmdDrawIndexed;
10840    if (!strcmp(funcName, "vkCmdDrawIndirect"))
10841        return (PFN_vkVoidFunction)vkCmdDrawIndirect;
10842    if (!strcmp(funcName, "vkCmdDrawIndexedIndirect"))
10843        return (PFN_vkVoidFunction)vkCmdDrawIndexedIndirect;
10844    if (!strcmp(funcName, "vkCmdDispatch"))
10845        return (PFN_vkVoidFunction)vkCmdDispatch;
10846    if (!strcmp(funcName, "vkCmdDispatchIndirect"))
10847        return (PFN_vkVoidFunction)vkCmdDispatchIndirect;
10848    if (!strcmp(funcName, "vkCmdCopyBuffer"))
10849        return (PFN_vkVoidFunction)vkCmdCopyBuffer;
10850    if (!strcmp(funcName, "vkCmdCopyImage"))
10851        return (PFN_vkVoidFunction)vkCmdCopyImage;
10852    if (!strcmp(funcName, "vkCmdBlitImage"))
10853        return (PFN_vkVoidFunction)vkCmdBlitImage;
10854    if (!strcmp(funcName, "vkCmdCopyBufferToImage"))
10855        return (PFN_vkVoidFunction)vkCmdCopyBufferToImage;
10856    if (!strcmp(funcName, "vkCmdCopyImageToBuffer"))
10857        return (PFN_vkVoidFunction)vkCmdCopyImageToBuffer;
10858    if (!strcmp(funcName, "vkCmdUpdateBuffer"))
10859        return (PFN_vkVoidFunction)vkCmdUpdateBuffer;
10860    if (!strcmp(funcName, "vkCmdFillBuffer"))
10861        return (PFN_vkVoidFunction)vkCmdFillBuffer;
10862    if (!strcmp(funcName, "vkCmdClearColorImage"))
10863        return (PFN_vkVoidFunction)vkCmdClearColorImage;
10864    if (!strcmp(funcName, "vkCmdClearDepthStencilImage"))
10865        return (PFN_vkVoidFunction)vkCmdClearDepthStencilImage;
10866    if (!strcmp(funcName, "vkCmdClearAttachments"))
10867        return (PFN_vkVoidFunction)vkCmdClearAttachments;
10868    if (!strcmp(funcName, "vkCmdResolveImage"))
10869        return (PFN_vkVoidFunction)vkCmdResolveImage;
10870    if (!strcmp(funcName, "vkCmdSetEvent"))
10871        return (PFN_vkVoidFunction)vkCmdSetEvent;
10872    if (!strcmp(funcName, "vkCmdResetEvent"))
10873        return (PFN_vkVoidFunction)vkCmdResetEvent;
10874    if (!strcmp(funcName, "vkCmdWaitEvents"))
10875        return (PFN_vkVoidFunction)vkCmdWaitEvents;
10876    if (!strcmp(funcName, "vkCmdPipelineBarrier"))
10877        return (PFN_vkVoidFunction)vkCmdPipelineBarrier;
10878    if (!strcmp(funcName, "vkCmdBeginQuery"))
10879        return (PFN_vkVoidFunction)vkCmdBeginQuery;
10880    if (!strcmp(funcName, "vkCmdEndQuery"))
10881        return (PFN_vkVoidFunction)vkCmdEndQuery;
10882    if (!strcmp(funcName, "vkCmdResetQueryPool"))
10883        return (PFN_vkVoidFunction)vkCmdResetQueryPool;
10884    if (!strcmp(funcName, "vkCmdCopyQueryPoolResults"))
10885        return (PFN_vkVoidFunction)vkCmdCopyQueryPoolResults;
10886    if (!strcmp(funcName, "vkCmdPushConstants"))
10887        return (PFN_vkVoidFunction)vkCmdPushConstants;
10888    if (!strcmp(funcName, "vkCmdWriteTimestamp"))
10889        return (PFN_vkVoidFunction)vkCmdWriteTimestamp;
10890    if (!strcmp(funcName, "vkCreateFramebuffer"))
10891        return (PFN_vkVoidFunction)vkCreateFramebuffer;
10892    if (!strcmp(funcName, "vkCreateShaderModule"))
10893        return (PFN_vkVoidFunction)vkCreateShaderModule;
10894    if (!strcmp(funcName, "vkCreateRenderPass"))
10895        return (PFN_vkVoidFunction)vkCreateRenderPass;
10896    if (!strcmp(funcName, "vkCmdBeginRenderPass"))
10897        return (PFN_vkVoidFunction)vkCmdBeginRenderPass;
10898    if (!strcmp(funcName, "vkCmdNextSubpass"))
10899        return (PFN_vkVoidFunction)vkCmdNextSubpass;
10900    if (!strcmp(funcName, "vkCmdEndRenderPass"))
10901        return (PFN_vkVoidFunction)vkCmdEndRenderPass;
10902    if (!strcmp(funcName, "vkCmdExecuteCommands"))
10903        return (PFN_vkVoidFunction)vkCmdExecuteCommands;
10904    if (!strcmp(funcName, "vkSetEvent"))
10905        return (PFN_vkVoidFunction)vkSetEvent;
10906    if (!strcmp(funcName, "vkMapMemory"))
10907        return (PFN_vkVoidFunction)vkMapMemory;
10908#if MTMERGESOURCE
10909    if (!strcmp(funcName, "vkUnmapMemory"))
10910        return (PFN_vkVoidFunction)vkUnmapMemory;
10911    if (!strcmp(funcName, "vkAllocateMemory"))
10912        return (PFN_vkVoidFunction)vkAllocateMemory;
10913    if (!strcmp(funcName, "vkFreeMemory"))
10914        return (PFN_vkVoidFunction)vkFreeMemory;
10915    if (!strcmp(funcName, "vkFlushMappedMemoryRanges"))
10916        return (PFN_vkVoidFunction)vkFlushMappedMemoryRanges;
10917    if (!strcmp(funcName, "vkInvalidateMappedMemoryRanges"))
10918        return (PFN_vkVoidFunction)vkInvalidateMappedMemoryRanges;
10919    if (!strcmp(funcName, "vkBindBufferMemory"))
10920        return (PFN_vkVoidFunction)vkBindBufferMemory;
10921    if (!strcmp(funcName, "vkGetBufferMemoryRequirements"))
10922        return (PFN_vkVoidFunction)vkGetBufferMemoryRequirements;
10923    if (!strcmp(funcName, "vkGetImageMemoryRequirements"))
10924        return (PFN_vkVoidFunction)vkGetImageMemoryRequirements;
10925#endif
10926    if (!strcmp(funcName, "vkGetQueryPoolResults"))
10927        return (PFN_vkVoidFunction)vkGetQueryPoolResults;
10928    if (!strcmp(funcName, "vkBindImageMemory"))
10929        return (PFN_vkVoidFunction)vkBindImageMemory;
10930    if (!strcmp(funcName, "vkQueueBindSparse"))
10931        return (PFN_vkVoidFunction)vkQueueBindSparse;
10932    if (!strcmp(funcName, "vkCreateSemaphore"))
10933        return (PFN_vkVoidFunction)vkCreateSemaphore;
10934    if (!strcmp(funcName, "vkCreateEvent"))
10935        return (PFN_vkVoidFunction)vkCreateEvent;
10936
10937    if (dev == NULL)
10938        return NULL;
10939
10940    layer_data *dev_data;
10941    dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
10942
10943    if (dev_data->device_extensions.wsi_enabled) {
10944        if (!strcmp(funcName, "vkCreateSwapchainKHR"))
10945            return (PFN_vkVoidFunction)vkCreateSwapchainKHR;
10946        if (!strcmp(funcName, "vkDestroySwapchainKHR"))
10947            return (PFN_vkVoidFunction)vkDestroySwapchainKHR;
10948        if (!strcmp(funcName, "vkGetSwapchainImagesKHR"))
10949            return (PFN_vkVoidFunction)vkGetSwapchainImagesKHR;
10950        if (!strcmp(funcName, "vkAcquireNextImageKHR"))
10951            return (PFN_vkVoidFunction)vkAcquireNextImageKHR;
10952        if (!strcmp(funcName, "vkQueuePresentKHR"))
10953            return (PFN_vkVoidFunction)vkQueuePresentKHR;
10954    }
10955
10956    VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
10957    {
10958        if (pTable->GetDeviceProcAddr == NULL)
10959            return NULL;
10960        return pTable->GetDeviceProcAddr(dev, funcName);
10961    }
10962}
10963
10964VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
10965    if (!strcmp(funcName, "vkGetInstanceProcAddr"))
10966        return (PFN_vkVoidFunction)vkGetInstanceProcAddr;
10967    if (!strcmp(funcName, "vkGetDeviceProcAddr"))
10968        return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
10969    if (!strcmp(funcName, "vkCreateInstance"))
10970        return (PFN_vkVoidFunction)vkCreateInstance;
10971    if (!strcmp(funcName, "vkCreateDevice"))
10972        return (PFN_vkVoidFunction)vkCreateDevice;
10973    if (!strcmp(funcName, "vkDestroyInstance"))
10974        return (PFN_vkVoidFunction)vkDestroyInstance;
10975#if MTMERGESOURCE
10976    if (!strcmp(funcName, "vkGetPhysicalDeviceMemoryProperties"))
10977        return (PFN_vkVoidFunction)vkGetPhysicalDeviceMemoryProperties;
10978#endif
10979    if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties"))
10980        return (PFN_vkVoidFunction)vkEnumerateInstanceLayerProperties;
10981    if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties"))
10982        return (PFN_vkVoidFunction)vkEnumerateInstanceExtensionProperties;
10983    if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties"))
10984        return (PFN_vkVoidFunction)vkEnumerateDeviceLayerProperties;
10985    if (!strcmp(funcName, "vkEnumerateDeviceExtensionProperties"))
10986        return (PFN_vkVoidFunction)vkEnumerateDeviceExtensionProperties;
10987
10988    if (instance == NULL)
10989        return NULL;
10990
10991    PFN_vkVoidFunction fptr;
10992
10993    layer_data *my_data;
10994    my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10995    fptr = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
10996    if (fptr)
10997        return fptr;
10998
10999    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
11000    if (pTable->GetInstanceProcAddr == NULL)
11001        return NULL;
11002    return pTable->GetInstanceProcAddr(instance, funcName);
11003}
11004