core_validation.cpp revision fda02154007d70e5c078c29585c5e8fdd90ebc5f
1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 */
26
27// Allow use of STL min and max functions in Windows
28#define NOMINMAX
29
30// Turn on mem_tracker merged code
31#define MTMERGESOURCE 1
32
33#include <SPIRV/spirv.hpp>
34#include <algorithm>
35#include <assert.h>
36#include <iostream>
37#include <list>
38#include <map>
39#include <mutex>
40#include <set>
41#include <stdio.h>
42#include <stdlib.h>
43#include <string.h>
44#include <string>
45#include <tuple>
46
47#include "vk_loader_platform.h"
48#include "vk_dispatch_table_helper.h"
49#include "vk_struct_string_helper_cpp.h"
50#if defined(__GNUC__)
51#pragma GCC diagnostic ignored "-Wwrite-strings"
52#endif
53#if defined(__GNUC__)
54#pragma GCC diagnostic warning "-Wwrite-strings"
55#endif
56#include "vk_struct_size_helper.h"
57#include "core_validation.h"
58#include "vk_layer_table.h"
59#include "vk_layer_data.h"
60#include "vk_layer_extension_utils.h"
61#include "vk_layer_utils.h"
62#include "spirv-tools/libspirv.h"
63
64#if defined __ANDROID__
65#include <android/log.h>
66#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
67#else
68#define LOGCONSOLE(...) printf(__VA_ARGS__)
69#endif
70
71using namespace std;
72
73// TODO : CB really needs it's own class and files so this is just temp code until that happens
74GLOBAL_CB_NODE::~GLOBAL_CB_NODE() {
75    for (uint32_t i=0; i<VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
76        // Make sure that no sets hold onto deleted CB binding
77        for (auto set : lastBound[i].uniqueBoundSets) {
78            set->RemoveBoundCommandBuffer(this);
79        }
80        lastBound[i].reset();
81    }
82}
83
84namespace core_validation {
85
86using std::unordered_map;
87using std::unordered_set;
88
89// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
90// Object value will be used to identify them internally.
91static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
92
93// Track command pools and their command buffers
94struct CMD_POOL_INFO {
95    VkCommandPoolCreateFlags createFlags;
96    uint32_t queueFamilyIndex;
97    list<VkCommandBuffer> commandBuffers; // list container of cmd buffers allocated from this pool
98};
99
100struct devExts {
101    bool wsi_enabled;
102    unordered_map<VkSwapchainKHR, SWAPCHAIN_NODE *> swapchainMap;
103    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
104};
105
106// fwd decls
107struct shader_module;
108
109// TODO : Split this into separate structs for instance and device level data?
110struct layer_data {
111    VkInstance instance;
112
113    debug_report_data *report_data;
114    std::vector<VkDebugReportCallbackEXT> logging_callback;
115    VkLayerDispatchTable *device_dispatch_table;
116    VkLayerInstanceDispatchTable *instance_dispatch_table;
117
118    devExts device_extensions;
119    unordered_set<VkQueue> queues;  // all queues under given device
120    // Global set of all cmdBuffers that are inFlight on this device
121    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
122    // Layer specific data
123    unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> samplerMap;
124    unordered_map<VkImageView, VkImageViewCreateInfo> imageViewMap;
125    unordered_map<VkImage, IMAGE_NODE> imageMap;
126    unordered_map<VkBufferView, VkBufferViewCreateInfo> bufferViewMap;
127    unordered_map<VkBuffer, BUFFER_NODE> bufferMap;
128    unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
129    unordered_map<VkCommandPool, CMD_POOL_INFO> commandPoolMap;
130    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
131    unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
132    unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
133    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
134    unordered_map<VkDeviceMemory, DEVICE_MEM_INFO> memObjMap;
135    unordered_map<VkFence, FENCE_NODE> fenceMap;
136    unordered_map<VkQueue, QUEUE_NODE> queueMap;
137    unordered_map<VkEvent, EVENT_NODE> eventMap;
138    unordered_map<QueryObject, bool> queryToStateMap;
139    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
140    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
141    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
142    unordered_map<VkFramebuffer, FRAMEBUFFER_NODE> frameBufferMap;
143    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
144    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
145    unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
146    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
147    VkDevice device;
148
149    // Device specific data
150    PHYS_DEV_PROPERTIES_NODE phys_dev_properties;
151    VkPhysicalDeviceMemoryProperties phys_dev_mem_props;
152
153    layer_data()
154        : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr), device_extensions(),
155          device(VK_NULL_HANDLE), phys_dev_properties{}, phys_dev_mem_props{} {};
156};
157
158// TODO : Do we need to guard access to layer_data_map w/ lock?
159static unordered_map<void *, layer_data *> layer_data_map;
160
161static const VkLayerProperties global_layer = {
162    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
163};
164
165template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
166    bool foundLayer = false;
167    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
168        if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
169            foundLayer = true;
170        }
171        // This has to be logged to console as we don't have a callback at this point.
172        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
173            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
174                       global_layer.layerName);
175        }
176    }
177}
178
179// Code imported from shader_checker
180static void build_def_index(shader_module *);
181
182// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
183// without the caller needing to care too much about the physical SPIRV module layout.
184struct spirv_inst_iter {
185    std::vector<uint32_t>::const_iterator zero;
186    std::vector<uint32_t>::const_iterator it;
187
188    uint32_t len() {
189        auto result = *it >> 16;
190        assert(result > 0);
191        return result;
192    }
193
194    uint32_t opcode() { return *it & 0x0ffffu; }
195
196    uint32_t const &word(unsigned n) {
197        assert(n < len());
198        return it[n];
199    }
200
201    uint32_t offset() { return (uint32_t)(it - zero); }
202
203    spirv_inst_iter() {}
204
205    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
206
207    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
208
209    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
210
211    spirv_inst_iter operator++(int) { /* x++ */
212        spirv_inst_iter ii = *this;
213        it += len();
214        return ii;
215    }
216
217    spirv_inst_iter operator++() { /* ++x; */
218        it += len();
219        return *this;
220    }
221
222    /* The iterator and the value are the same thing. */
223    spirv_inst_iter &operator*() { return *this; }
224    spirv_inst_iter const &operator*() const { return *this; }
225};
226
227struct shader_module {
228    /* the spirv image itself */
229    vector<uint32_t> words;
230    /* a mapping of <id> to the first word of its def. this is useful because walking type
231     * trees, constant expressions, etc requires jumping all over the instruction stream.
232     */
233    unordered_map<unsigned, unsigned> def_index;
234
235    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
236        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
237          def_index() {
238
239        build_def_index(this);
240    }
241
242    /* expose begin() / end() to enable range-based for */
243    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
244    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
245    /* given an offset into the module, produce an iterator there. */
246    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
247
248    /* gets an iterator to the definition of an id */
249    spirv_inst_iter get_def(unsigned id) const {
250        auto it = def_index.find(id);
251        if (it == def_index.end()) {
252            return end();
253        }
254        return at(it->second);
255    }
256};
257
258// TODO : This can be much smarter, using separate locks for separate global data
259static std::mutex global_lock;
260
261static VkDeviceMemory *get_object_mem_binding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
262    switch (type) {
263    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
264        auto it = my_data->imageMap.find(VkImage(handle));
265        if (it != my_data->imageMap.end())
266            return &(*it).second.mem;
267        break;
268    }
269    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
270        auto it = my_data->bufferMap.find(VkBuffer(handle));
271        if (it != my_data->bufferMap.end())
272            return &(*it).second.mem;
273        break;
274    }
275    default:
276        break;
277    }
278    return nullptr;
279}
280
281// prototype
282static GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer);
283
284// Helper function to validate correct usage bits set for buffers or images
285//  Verify that (actual & desired) flags != 0 or,
286//   if strict is true, verify that (actual & desired) flags == desired
287//  In case of error, report it via dbg callbacks
288static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict,
289                                     uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
290                                     char const *func_name, char const *usage_str) {
291    bool correct_usage = false;
292    bool skipCall = false;
293    if (strict)
294        correct_usage = ((actual & desired) == desired);
295    else
296        correct_usage = ((actual & desired) != 0);
297    if (!correct_usage) {
298        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
299                           MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s 0x%" PRIxLEAST64
300                                                               " used by %s. In this case, %s should have %s set during creation.",
301                           ty_str, obj_handle, func_name, ty_str, usage_str);
302    }
303    return skipCall;
304}
305
306// Helper function to validate usage flags for images
307// Pulls image info and then sends actual vs. desired usage off to helper above where
308//  an error will be flagged if usage is not correct
309static bool validate_image_usage_flags(layer_data *dev_data, VkImage image, VkFlags desired, VkBool32 strict,
310                                           char const *func_name, char const *usage_string) {
311    bool skipCall = false;
312    auto const image_node = dev_data->imageMap.find(image);
313    if (image_node != dev_data->imageMap.end()) {
314        skipCall = validate_usage_flags(dev_data, image_node->second.createInfo.usage, desired, strict, (uint64_t)image,
315                                        VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "image", func_name, usage_string);
316    }
317    return skipCall;
318}
319
320// Helper function to validate usage flags for buffers
321// Pulls buffer info and then sends actual vs. desired usage off to helper above where
322//  an error will be flagged if usage is not correct
323static bool validate_buffer_usage_flags(layer_data *dev_data, VkBuffer buffer, VkFlags desired, VkBool32 strict,
324                                            char const *func_name, char const *usage_string) {
325    bool skipCall = false;
326    auto const buffer_node = dev_data->bufferMap.find(buffer);
327    if (buffer_node != dev_data->bufferMap.end()) {
328        skipCall = validate_usage_flags(dev_data, buffer_node->second.createInfo.usage, desired, strict, (uint64_t)buffer,
329                                        VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string);
330    }
331    return skipCall;
332}
333
334// Return ptr to info in map container containing mem, or NULL if not found
335//  Calls to this function should be wrapped in mutex
336static DEVICE_MEM_INFO *get_mem_obj_info(layer_data *dev_data, const VkDeviceMemory mem) {
337    auto item = dev_data->memObjMap.find(mem);
338    if (item != dev_data->memObjMap.end()) {
339        return &(*item).second;
340    } else {
341        return NULL;
342    }
343}
344
345static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
346                             const VkMemoryAllocateInfo *pAllocateInfo) {
347    assert(object != NULL);
348
349    memcpy(&my_data->memObjMap[mem].allocInfo, pAllocateInfo, sizeof(VkMemoryAllocateInfo));
350    // TODO:  Update for real hardware, actually process allocation info structures
351    my_data->memObjMap[mem].allocInfo.pNext = NULL;
352    my_data->memObjMap[mem].object = object;
353    my_data->memObjMap[mem].mem = mem;
354    my_data->memObjMap[mem].image = VK_NULL_HANDLE;
355    my_data->memObjMap[mem].memRange.offset = 0;
356    my_data->memObjMap[mem].memRange.size = 0;
357    my_data->memObjMap[mem].pData = 0;
358    my_data->memObjMap[mem].pDriverData = 0;
359    my_data->memObjMap[mem].valid = false;
360}
361
362static bool validate_memory_is_valid(layer_data *dev_data, VkDeviceMemory mem, const char *functionName,
363                                     VkImage image = VK_NULL_HANDLE) {
364    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
365        auto const image_node = dev_data->imageMap.find(image);
366        if (image_node != dev_data->imageMap.end() && !image_node->second.valid) {
367            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
368                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
369                           "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
370                           functionName, (uint64_t)(image));
371        }
372    } else {
373        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
374        if (pMemObj && !pMemObj->valid) {
375            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
376                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
377                           "%s: Cannot read invalid memory 0x%" PRIx64 ", please fill the memory before using.", functionName,
378                           (uint64_t)(mem));
379        }
380    }
381    return false;
382}
383
384static void set_memory_valid(layer_data *dev_data, VkDeviceMemory mem, bool valid, VkImage image = VK_NULL_HANDLE) {
385    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
386        auto image_node = dev_data->imageMap.find(image);
387        if (image_node != dev_data->imageMap.end()) {
388            image_node->second.valid = valid;
389        }
390    } else {
391        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
392        if (pMemObj) {
393            pMemObj->valid = valid;
394        }
395    }
396}
397
398// Find CB Info and add mem reference to list container
399// Find Mem Obj Info and add CB reference to list container
400static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
401                                              const char *apiName) {
402    bool skipCall = false;
403
404    // Skip validation if this image was created through WSI
405    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
406
407        // First update CB binding in MemObj mini CB list
408        DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
409        if (pMemInfo) {
410            pMemInfo->commandBufferBindings.insert(cb);
411            // Now update CBInfo's Mem reference list
412            GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
413            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
414            if (pCBNode) {
415                pCBNode->memObjs.insert(mem);
416            }
417        }
418    }
419    return skipCall;
420}
421// For every mem obj bound to particular CB, free bindings related to that CB
422static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *pCBNode) {
423    if (pCBNode) {
424        if (pCBNode->memObjs.size() > 0) {
425            for (auto mem : pCBNode->memObjs) {
426                DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
427                if (pInfo) {
428                    pInfo->commandBufferBindings.erase(pCBNode->commandBuffer);
429                }
430            }
431            pCBNode->memObjs.clear();
432        }
433        pCBNode->validate_functions.clear();
434    }
435}
436// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
437static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
438    clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
439}
440
441// For given MemObjInfo, report Obj & CB bindings
442static bool reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
443    bool skipCall = false;
444    size_t cmdBufRefCount = pMemObjInfo->commandBufferBindings.size();
445    size_t objRefCount = pMemObjInfo->objBindings.size();
446
447    if ((pMemObjInfo->commandBufferBindings.size()) != 0) {
448        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
449                           (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
450                           "Attempting to free memory object 0x%" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
451                           " references",
452                           (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
453    }
454
455    if (cmdBufRefCount > 0 && pMemObjInfo->commandBufferBindings.size() > 0) {
456        for (auto cb : pMemObjInfo->commandBufferBindings) {
457            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
458                    (uint64_t)cb, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
459                    "Command Buffer 0x%p still has a reference to mem obj 0x%" PRIxLEAST64, cb, (uint64_t)pMemObjInfo->mem);
460        }
461        // Clear the list of hanging references
462        pMemObjInfo->commandBufferBindings.clear();
463    }
464
465    if (objRefCount > 0 && pMemObjInfo->objBindings.size() > 0) {
466        for (auto obj : pMemObjInfo->objBindings) {
467            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__,
468                    MEMTRACK_FREED_MEM_REF, "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64,
469                    obj.handle, (uint64_t)pMemObjInfo->mem);
470        }
471        // Clear the list of hanging references
472        pMemObjInfo->objBindings.clear();
473    }
474    return skipCall;
475}
476
477static bool deleteMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem) {
478    bool skipCall = false;
479    auto item = my_data->memObjMap.find(mem);
480    if (item != my_data->memObjMap.end()) {
481        my_data->memObjMap.erase(item);
482    } else {
483        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
484                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
485                           "Request to delete memory object 0x%" PRIxLEAST64 " not present in memory Object Map", (uint64_t)mem);
486    }
487    return skipCall;
488}
489
490static bool freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, bool internal) {
491    bool skipCall = false;
492    // Parse global list to find info w/ mem
493    DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
494    if (pInfo) {
495        if (pInfo->allocInfo.allocationSize == 0 && !internal) {
496            // TODO: Verify against Valid Use section
497            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
498                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
499                               "Attempting to free memory associated with a Persistent Image, 0x%" PRIxLEAST64 ", "
500                               "this should not be explicitly freed\n",
501                               (uint64_t)mem);
502        } else {
503            // Clear any CB bindings for completed CBs
504            //   TODO : Is there a better place to do this?
505
506            assert(pInfo->object != VK_NULL_HANDLE);
507            // clear_cmd_buf_and_mem_references removes elements from
508            // pInfo->commandBufferBindings -- this copy not needed in c++14,
509            // and probably not needed in practice in c++11
510            auto bindings = pInfo->commandBufferBindings;
511            for (auto cb : bindings) {
512                if (!dev_data->globalInFlightCmdBuffers.count(cb)) {
513                    clear_cmd_buf_and_mem_references(dev_data, cb);
514                }
515            }
516
517            // Now verify that no references to this mem obj remain and remove bindings
518            if (pInfo->commandBufferBindings.size() || pInfo->objBindings.size()) {
519                skipCall |= reportMemReferencesAndCleanUp(dev_data, pInfo);
520            }
521            // Delete mem obj info
522            skipCall |= deleteMemObjInfo(dev_data, object, mem);
523        }
524    }
525    return skipCall;
526}
527
528static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
529    switch (type) {
530    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
531        return "image";
532    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
533        return "buffer";
534    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
535        return "swapchain";
536    default:
537        return "unknown";
538    }
539}
540
541// Remove object binding performs 3 tasks:
542// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
543// 2. Clear mem binding for image/buffer by setting its handle to 0
544// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
545static bool clear_object_binding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
546    // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
547    bool skipCall = false;
548    VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
549    if (pMemBinding) {
550        DEVICE_MEM_INFO *pMemObjInfo = get_mem_obj_info(dev_data, *pMemBinding);
551        // TODO : Make sure this is a reasonable way to reset mem binding
552        *pMemBinding = VK_NULL_HANDLE;
553        if (pMemObjInfo) {
554            // This obj is bound to a memory object. Remove the reference to this object in that memory object's list,
555            // and set the objects memory binding pointer to NULL.
556            if (!pMemObjInfo->objBindings.erase({handle, type})) {
557                skipCall |=
558                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
559                            "MEM", "While trying to clear mem binding for %s obj 0x%" PRIxLEAST64
560                                   ", unable to find that object referenced by mem obj 0x%" PRIxLEAST64,
561                            object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
562            }
563        }
564    }
565    return skipCall;
566}
567
568// For NULL mem case, output warning
569// Make sure given object is in global object map
570//  IF a previous binding existed, output validation error
571//  Otherwise, add reference from objectInfo to memoryInfo
572//  Add reference off of objInfo
573static bool set_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
574                                VkDebugReportObjectTypeEXT type, const char *apiName) {
575    bool skipCall = false;
576    // Handle NULL case separately, just clear previous binding & decrement reference
577    if (mem == VK_NULL_HANDLE) {
578        // TODO: Verify against Valid Use section of spec.
579        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
580                           "MEM", "In %s, attempting to Bind Obj(0x%" PRIxLEAST64 ") to NULL", apiName, handle);
581    } else {
582        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
583        assert(pMemBinding);
584        DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
585        if (pMemInfo) {
586            DEVICE_MEM_INFO *pPrevBinding = get_mem_obj_info(dev_data, *pMemBinding);
587            if (pPrevBinding != NULL) {
588                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
589                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT,
590                                    "MEM", "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
591                                           ") which has already been bound to mem object 0x%" PRIxLEAST64,
592                                    apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem);
593            } else {
594                pMemInfo->objBindings.insert({handle, type});
595                // For image objects, make sure default memory state is correctly set
596                // TODO : What's the best/correct way to handle this?
597                if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
598                    auto const image_node = dev_data->imageMap.find(VkImage(handle));
599                    if (image_node != dev_data->imageMap.end()) {
600                        VkImageCreateInfo ici = image_node->second.createInfo;
601                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
602                            // TODO::  More memory state transition stuff.
603                        }
604                    }
605                }
606                *pMemBinding = mem;
607            }
608        }
609    }
610    return skipCall;
611}
612
613// For NULL mem case, clear any previous binding Else...
614// Make sure given object is in its object map
615//  IF a previous binding existed, update binding
616//  Add reference from objectInfo to memoryInfo
617//  Add reference off of object's binding info
618// Return VK_TRUE if addition is successful, VK_FALSE otherwise
619static bool set_sparse_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
620                                       VkDebugReportObjectTypeEXT type, const char *apiName) {
621    bool skipCall = VK_FALSE;
622    // Handle NULL case separately, just clear previous binding & decrement reference
623    if (mem == VK_NULL_HANDLE) {
624        skipCall = clear_object_binding(dev_data, handle, type);
625    } else {
626        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
627        assert(pMemBinding);
628        DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
629        if (pInfo) {
630            pInfo->objBindings.insert({handle, type});
631            // Need to set mem binding for this object
632            *pMemBinding = mem;
633        }
634    }
635    return skipCall;
636}
637
638// For given Object, get 'mem' obj that it's bound to or NULL if no binding
639static bool get_mem_binding_from_object(layer_data *dev_data, const uint64_t handle,
640                                            const VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
641    bool skipCall = false;
642    *mem = VK_NULL_HANDLE;
643    VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
644    if (pMemBinding) {
645        *mem = *pMemBinding;
646    } else {
647        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
648                           "MEM", "Trying to get mem binding for object 0x%" PRIxLEAST64 " but no such object in %s list", handle,
649                           object_type_to_string(type));
650    }
651    return skipCall;
652}
653
654// Print details of MemObjInfo list
655static void print_mem_list(layer_data *dev_data) {
656    DEVICE_MEM_INFO *pInfo = NULL;
657
658    // Early out if info is not requested
659    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
660        return;
661    }
662
663    // Just printing each msg individually for now, may want to package these into single large print
664    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
665            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
666            dev_data->memObjMap.size());
667    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
668            MEMTRACK_NONE, "MEM", "=============================");
669
670    if (dev_data->memObjMap.size() <= 0)
671        return;
672
673    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
674        pInfo = &(*ii).second;
675
676        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
677                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at 0x%p===", (void *)pInfo);
678        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
679                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: 0x%" PRIxLEAST64, (uint64_t)(pInfo->mem));
680        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
681                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: " PRINTF_SIZE_T_SPECIFIER,
682                pInfo->commandBufferBindings.size() + pInfo->objBindings.size());
683        if (0 != pInfo->allocInfo.allocationSize) {
684            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&pInfo->allocInfo, "MEM(INFO):         ");
685            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
686                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
687        } else {
688            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
689                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
690        }
691
692        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
693                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
694                pInfo->objBindings.size());
695        if (pInfo->objBindings.size() > 0) {
696            for (auto obj : pInfo->objBindings) {
697                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
698                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT 0x%" PRIx64, obj.handle);
699            }
700        }
701
702        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
703                __LINE__, MEMTRACK_NONE, "MEM",
704                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
705                pInfo->commandBufferBindings.size());
706        if (pInfo->commandBufferBindings.size() > 0) {
707            for (auto cb : pInfo->commandBufferBindings) {
708                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
709                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB 0x%p", cb);
710            }
711        }
712    }
713}
714
715static void printCBList(layer_data *my_data) {
716    GLOBAL_CB_NODE *pCBInfo = NULL;
717
718    // Early out if info is not requested
719    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
720        return;
721    }
722
723    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
724            MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
725            my_data->commandBufferMap.size());
726    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
727            MEMTRACK_NONE, "MEM", "==================");
728
729    if (my_data->commandBufferMap.size() <= 0)
730        return;
731
732    for (auto &cb_node : my_data->commandBufferMap) {
733        pCBInfo = cb_node.second;
734
735        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
736                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (0x%p) has CB 0x%p", (void *)pCBInfo, (void *)pCBInfo->commandBuffer);
737
738        if (pCBInfo->memObjs.size() <= 0)
739            continue;
740        for (auto obj : pCBInfo->memObjs) {
741            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
742                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj 0x%" PRIx64, (uint64_t)obj);
743        }
744    }
745}
746
747// Return a string representation of CMD_TYPE enum
748static string cmdTypeToString(CMD_TYPE cmd) {
749    switch (cmd) {
750    case CMD_BINDPIPELINE:
751        return "CMD_BINDPIPELINE";
752    case CMD_BINDPIPELINEDELTA:
753        return "CMD_BINDPIPELINEDELTA";
754    case CMD_SETVIEWPORTSTATE:
755        return "CMD_SETVIEWPORTSTATE";
756    case CMD_SETLINEWIDTHSTATE:
757        return "CMD_SETLINEWIDTHSTATE";
758    case CMD_SETDEPTHBIASSTATE:
759        return "CMD_SETDEPTHBIASSTATE";
760    case CMD_SETBLENDSTATE:
761        return "CMD_SETBLENDSTATE";
762    case CMD_SETDEPTHBOUNDSSTATE:
763        return "CMD_SETDEPTHBOUNDSSTATE";
764    case CMD_SETSTENCILREADMASKSTATE:
765        return "CMD_SETSTENCILREADMASKSTATE";
766    case CMD_SETSTENCILWRITEMASKSTATE:
767        return "CMD_SETSTENCILWRITEMASKSTATE";
768    case CMD_SETSTENCILREFERENCESTATE:
769        return "CMD_SETSTENCILREFERENCESTATE";
770    case CMD_BINDDESCRIPTORSETS:
771        return "CMD_BINDDESCRIPTORSETS";
772    case CMD_BINDINDEXBUFFER:
773        return "CMD_BINDINDEXBUFFER";
774    case CMD_BINDVERTEXBUFFER:
775        return "CMD_BINDVERTEXBUFFER";
776    case CMD_DRAW:
777        return "CMD_DRAW";
778    case CMD_DRAWINDEXED:
779        return "CMD_DRAWINDEXED";
780    case CMD_DRAWINDIRECT:
781        return "CMD_DRAWINDIRECT";
782    case CMD_DRAWINDEXEDINDIRECT:
783        return "CMD_DRAWINDEXEDINDIRECT";
784    case CMD_DISPATCH:
785        return "CMD_DISPATCH";
786    case CMD_DISPATCHINDIRECT:
787        return "CMD_DISPATCHINDIRECT";
788    case CMD_COPYBUFFER:
789        return "CMD_COPYBUFFER";
790    case CMD_COPYIMAGE:
791        return "CMD_COPYIMAGE";
792    case CMD_BLITIMAGE:
793        return "CMD_BLITIMAGE";
794    case CMD_COPYBUFFERTOIMAGE:
795        return "CMD_COPYBUFFERTOIMAGE";
796    case CMD_COPYIMAGETOBUFFER:
797        return "CMD_COPYIMAGETOBUFFER";
798    case CMD_CLONEIMAGEDATA:
799        return "CMD_CLONEIMAGEDATA";
800    case CMD_UPDATEBUFFER:
801        return "CMD_UPDATEBUFFER";
802    case CMD_FILLBUFFER:
803        return "CMD_FILLBUFFER";
804    case CMD_CLEARCOLORIMAGE:
805        return "CMD_CLEARCOLORIMAGE";
806    case CMD_CLEARATTACHMENTS:
807        return "CMD_CLEARCOLORATTACHMENT";
808    case CMD_CLEARDEPTHSTENCILIMAGE:
809        return "CMD_CLEARDEPTHSTENCILIMAGE";
810    case CMD_RESOLVEIMAGE:
811        return "CMD_RESOLVEIMAGE";
812    case CMD_SETEVENT:
813        return "CMD_SETEVENT";
814    case CMD_RESETEVENT:
815        return "CMD_RESETEVENT";
816    case CMD_WAITEVENTS:
817        return "CMD_WAITEVENTS";
818    case CMD_PIPELINEBARRIER:
819        return "CMD_PIPELINEBARRIER";
820    case CMD_BEGINQUERY:
821        return "CMD_BEGINQUERY";
822    case CMD_ENDQUERY:
823        return "CMD_ENDQUERY";
824    case CMD_RESETQUERYPOOL:
825        return "CMD_RESETQUERYPOOL";
826    case CMD_COPYQUERYPOOLRESULTS:
827        return "CMD_COPYQUERYPOOLRESULTS";
828    case CMD_WRITETIMESTAMP:
829        return "CMD_WRITETIMESTAMP";
830    case CMD_INITATOMICCOUNTERS:
831        return "CMD_INITATOMICCOUNTERS";
832    case CMD_LOADATOMICCOUNTERS:
833        return "CMD_LOADATOMICCOUNTERS";
834    case CMD_SAVEATOMICCOUNTERS:
835        return "CMD_SAVEATOMICCOUNTERS";
836    case CMD_BEGINRENDERPASS:
837        return "CMD_BEGINRENDERPASS";
838    case CMD_ENDRENDERPASS:
839        return "CMD_ENDRENDERPASS";
840    default:
841        return "UNKNOWN";
842    }
843}
844
845// SPIRV utility functions
846static void build_def_index(shader_module *module) {
847    for (auto insn : *module) {
848        switch (insn.opcode()) {
849        /* Types */
850        case spv::OpTypeVoid:
851        case spv::OpTypeBool:
852        case spv::OpTypeInt:
853        case spv::OpTypeFloat:
854        case spv::OpTypeVector:
855        case spv::OpTypeMatrix:
856        case spv::OpTypeImage:
857        case spv::OpTypeSampler:
858        case spv::OpTypeSampledImage:
859        case spv::OpTypeArray:
860        case spv::OpTypeRuntimeArray:
861        case spv::OpTypeStruct:
862        case spv::OpTypeOpaque:
863        case spv::OpTypePointer:
864        case spv::OpTypeFunction:
865        case spv::OpTypeEvent:
866        case spv::OpTypeDeviceEvent:
867        case spv::OpTypeReserveId:
868        case spv::OpTypeQueue:
869        case spv::OpTypePipe:
870            module->def_index[insn.word(1)] = insn.offset();
871            break;
872
873        /* Fixed constants */
874        case spv::OpConstantTrue:
875        case spv::OpConstantFalse:
876        case spv::OpConstant:
877        case spv::OpConstantComposite:
878        case spv::OpConstantSampler:
879        case spv::OpConstantNull:
880            module->def_index[insn.word(2)] = insn.offset();
881            break;
882
883        /* Specialization constants */
884        case spv::OpSpecConstantTrue:
885        case spv::OpSpecConstantFalse:
886        case spv::OpSpecConstant:
887        case spv::OpSpecConstantComposite:
888        case spv::OpSpecConstantOp:
889            module->def_index[insn.word(2)] = insn.offset();
890            break;
891
892        /* Variables */
893        case spv::OpVariable:
894            module->def_index[insn.word(2)] = insn.offset();
895            break;
896
897        /* Functions */
898        case spv::OpFunction:
899            module->def_index[insn.word(2)] = insn.offset();
900            break;
901
902        default:
903            /* We don't care about any other defs for now. */
904            break;
905        }
906    }
907}
908
909static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
910    for (auto insn : *src) {
911        if (insn.opcode() == spv::OpEntryPoint) {
912            auto entrypointName = (char const *)&insn.word(3);
913            auto entrypointStageBits = 1u << insn.word(1);
914
915            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
916                return insn;
917            }
918        }
919    }
920
921    return src->end();
922}
923
924static char const *storage_class_name(unsigned sc) {
925    switch (sc) {
926    case spv::StorageClassInput:
927        return "input";
928    case spv::StorageClassOutput:
929        return "output";
930    case spv::StorageClassUniformConstant:
931        return "const uniform";
932    case spv::StorageClassUniform:
933        return "uniform";
934    case spv::StorageClassWorkgroup:
935        return "workgroup local";
936    case spv::StorageClassCrossWorkgroup:
937        return "workgroup global";
938    case spv::StorageClassPrivate:
939        return "private global";
940    case spv::StorageClassFunction:
941        return "function";
942    case spv::StorageClassGeneric:
943        return "generic";
944    case spv::StorageClassAtomicCounter:
945        return "atomic counter";
946    case spv::StorageClassImage:
947        return "image";
948    case spv::StorageClassPushConstant:
949        return "push constant";
950    default:
951        return "unknown";
952    }
953}
954
955/* get the value of an integral constant */
956unsigned get_constant_value(shader_module const *src, unsigned id) {
957    auto value = src->get_def(id);
958    assert(value != src->end());
959
960    if (value.opcode() != spv::OpConstant) {
961        /* TODO: Either ensure that the specialization transform is already performed on a module we're
962            considering here, OR -- specialize on the fly now.
963            */
964        return 1;
965    }
966
967    return value.word(3);
968}
969
970
971static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
972    auto insn = src->get_def(type);
973    assert(insn != src->end());
974
975    switch (insn.opcode()) {
976    case spv::OpTypeBool:
977        ss << "bool";
978        break;
979    case spv::OpTypeInt:
980        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
981        break;
982    case spv::OpTypeFloat:
983        ss << "float" << insn.word(2);
984        break;
985    case spv::OpTypeVector:
986        ss << "vec" << insn.word(3) << " of ";
987        describe_type_inner(ss, src, insn.word(2));
988        break;
989    case spv::OpTypeMatrix:
990        ss << "mat" << insn.word(3) << " of ";
991        describe_type_inner(ss, src, insn.word(2));
992        break;
993    case spv::OpTypeArray:
994        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
995        describe_type_inner(ss, src, insn.word(2));
996        break;
997    case spv::OpTypePointer:
998        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
999        describe_type_inner(ss, src, insn.word(3));
1000        break;
1001    case spv::OpTypeStruct: {
1002        ss << "struct of (";
1003        for (unsigned i = 2; i < insn.len(); i++) {
1004            describe_type_inner(ss, src, insn.word(i));
1005            if (i == insn.len() - 1) {
1006                ss << ")";
1007            } else {
1008                ss << ", ";
1009            }
1010        }
1011        break;
1012    }
1013    case spv::OpTypeSampler:
1014        ss << "sampler";
1015        break;
1016    case spv::OpTypeSampledImage:
1017        ss << "sampler+";
1018        describe_type_inner(ss, src, insn.word(2));
1019        break;
1020    case spv::OpTypeImage:
1021        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1022        break;
1023    default:
1024        ss << "oddtype";
1025        break;
1026    }
1027}
1028
1029
1030static std::string describe_type(shader_module const *src, unsigned type) {
1031    std::ostringstream ss;
1032    describe_type_inner(ss, src, type);
1033    return ss.str();
1034}
1035
1036
1037static bool is_narrow_numeric_type(spirv_inst_iter type)
1038{
1039    if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat)
1040        return false;
1041    return type.word(2) < 64;
1042}
1043
1044
1045static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) {
1046    /* walk two type trees together, and complain about differences */
1047    auto a_insn = a->get_def(a_type);
1048    auto b_insn = b->get_def(b_type);
1049    assert(a_insn != a->end());
1050    assert(b_insn != b->end());
1051
1052    if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1053        return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
1054    }
1055
1056    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1057        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1058        return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1059    }
1060
1061    if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1062        return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
1063    }
1064
1065    if (a_insn.opcode() != b_insn.opcode()) {
1066        return false;
1067    }
1068
1069    if (a_insn.opcode() == spv::OpTypePointer) {
1070        /* match on pointee type. storage class is expected to differ */
1071        return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
1072    }
1073
1074    if (a_arrayed || b_arrayed) {
1075        /* if we havent resolved array-of-verts by here, we're not going to. */
1076        return false;
1077    }
1078
1079    switch (a_insn.opcode()) {
1080    case spv::OpTypeBool:
1081        return true;
1082    case spv::OpTypeInt:
1083        /* match on width, signedness */
1084        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1085    case spv::OpTypeFloat:
1086        /* match on width */
1087        return a_insn.word(2) == b_insn.word(2);
1088    case spv::OpTypeVector:
1089        /* match on element type, count. */
1090        if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false))
1091            return false;
1092        if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1093            return a_insn.word(3) >= b_insn.word(3);
1094        }
1095        else {
1096            return a_insn.word(3) == b_insn.word(3);
1097        }
1098    case spv::OpTypeMatrix:
1099        /* match on element type, count. */
1100        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3);
1101    case spv::OpTypeArray:
1102        /* match on element type, count. these all have the same layout. we don't get here if
1103         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1104         * not a literal within OpTypeArray */
1105        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1106               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1107    case spv::OpTypeStruct:
1108        /* match on all element types */
1109        {
1110            if (a_insn.len() != b_insn.len()) {
1111                return false; /* structs cannot match if member counts differ */
1112            }
1113
1114            for (unsigned i = 2; i < a_insn.len(); i++) {
1115                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
1116                    return false;
1117                }
1118            }
1119
1120            return true;
1121        }
1122    default:
1123        /* remaining types are CLisms, or may not appear in the interfaces we
1124         * are interested in. Just claim no match.
1125         */
1126        return false;
1127    }
1128}
1129
1130static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1131    auto it = map.find(id);
1132    if (it == map.end())
1133        return def;
1134    else
1135        return it->second;
1136}
1137
1138static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1139    auto insn = src->get_def(type);
1140    assert(insn != src->end());
1141
1142    switch (insn.opcode()) {
1143    case spv::OpTypePointer:
1144        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1145         * we're never actually passing pointers around. */
1146        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1147    case spv::OpTypeArray:
1148        if (strip_array_level) {
1149            return get_locations_consumed_by_type(src, insn.word(2), false);
1150        } else {
1151            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1152        }
1153    case spv::OpTypeMatrix:
1154        /* num locations is the dimension * element size */
1155        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1156    case spv::OpTypeVector: {
1157        auto scalar_type = src->get_def(insn.word(2));
1158        auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ?
1159            scalar_type.word(2) : 32;
1160
1161        /* locations are 128-bit wide; 3- and 4-component vectors of 64 bit
1162         * types require two. */
1163        return (bit_width * insn.word(3) + 127) / 128;
1164    }
1165    default:
1166        /* everything else is just 1. */
1167        return 1;
1168
1169        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1170         * multiple locations. */
1171    }
1172}
1173
1174static unsigned get_locations_consumed_by_format(VkFormat format) {
1175    switch (format) {
1176    case VK_FORMAT_R64G64B64A64_SFLOAT:
1177    case VK_FORMAT_R64G64B64A64_SINT:
1178    case VK_FORMAT_R64G64B64A64_UINT:
1179    case VK_FORMAT_R64G64B64_SFLOAT:
1180    case VK_FORMAT_R64G64B64_SINT:
1181    case VK_FORMAT_R64G64B64_UINT:
1182        return 2;
1183    default:
1184        return 1;
1185    }
1186}
1187
1188typedef std::pair<unsigned, unsigned> location_t;
1189typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1190
1191struct interface_var {
1192    uint32_t id;
1193    uint32_t type_id;
1194    uint32_t offset;
1195    bool is_patch;
1196    bool is_block_member;
1197    /* TODO: collect the name, too? Isn't required to be present. */
1198};
1199
1200struct shader_stage_attributes {
1201    char const *const name;
1202    bool arrayed_input;
1203    bool arrayed_output;
1204};
1205
1206static shader_stage_attributes shader_stage_attribs[] = {
1207    {"vertex shader", false, false},
1208    {"tessellation control shader", true, true},
1209    {"tessellation evaluation shader", true, false},
1210    {"geometry shader", true, false},
1211    {"fragment shader", false, false},
1212};
1213
1214static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1215    while (true) {
1216
1217        if (def.opcode() == spv::OpTypePointer) {
1218            def = src->get_def(def.word(3));
1219        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1220            def = src->get_def(def.word(2));
1221            is_array_of_verts = false;
1222        } else if (def.opcode() == spv::OpTypeStruct) {
1223            return def;
1224        } else {
1225            return src->end();
1226        }
1227    }
1228}
1229
1230static void collect_interface_block_members(shader_module const *src,
1231                                            std::map<location_t, interface_var> &out,
1232                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1233                                            uint32_t id, uint32_t type_id, bool is_patch) {
1234    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1235    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1236    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1237        /* this isn't an interface block. */
1238        return;
1239    }
1240
1241    std::unordered_map<unsigned, unsigned> member_components;
1242
1243    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1244    for (auto insn : *src) {
1245        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1246            unsigned member_index = insn.word(2);
1247
1248            if (insn.word(3) == spv::DecorationComponent) {
1249                unsigned component = insn.word(4);
1250                member_components[member_index] = component;
1251            }
1252        }
1253    }
1254
1255    /* Second pass -- produce the output, from Location decorations */
1256    for (auto insn : *src) {
1257        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1258            unsigned member_index = insn.word(2);
1259            unsigned member_type_id = type.word(2 + member_index);
1260
1261            if (insn.word(3) == spv::DecorationLocation) {
1262                unsigned location = insn.word(4);
1263                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1264                auto component_it = member_components.find(member_index);
1265                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1266
1267                for (unsigned int offset = 0; offset < num_locations; offset++) {
1268                    interface_var v;
1269                    v.id = id;
1270                    /* TODO: member index in interface_var too? */
1271                    v.type_id = member_type_id;
1272                    v.offset = offset;
1273                    v.is_patch = is_patch;
1274                    v.is_block_member = true;
1275                    out[std::make_pair(location + offset, component)] = v;
1276                }
1277            }
1278        }
1279    }
1280}
1281
1282static void collect_interface_by_location(shader_module const *src, spirv_inst_iter entrypoint,
1283                                          spv::StorageClass sinterface, std::map<location_t, interface_var> &out,
1284                                          bool is_array_of_verts) {
1285    std::unordered_map<unsigned, unsigned> var_locations;
1286    std::unordered_map<unsigned, unsigned> var_builtins;
1287    std::unordered_map<unsigned, unsigned> var_components;
1288    std::unordered_map<unsigned, unsigned> blocks;
1289    std::unordered_map<unsigned, unsigned> var_patch;
1290
1291    for (auto insn : *src) {
1292
1293        /* We consider two interface models: SSO rendezvous-by-location, and
1294         * builtins. Complain about anything that fits neither model.
1295         */
1296        if (insn.opcode() == spv::OpDecorate) {
1297            if (insn.word(2) == spv::DecorationLocation) {
1298                var_locations[insn.word(1)] = insn.word(3);
1299            }
1300
1301            if (insn.word(2) == spv::DecorationBuiltIn) {
1302                var_builtins[insn.word(1)] = insn.word(3);
1303            }
1304
1305            if (insn.word(2) == spv::DecorationComponent) {
1306                var_components[insn.word(1)] = insn.word(3);
1307            }
1308
1309            if (insn.word(2) == spv::DecorationBlock) {
1310                blocks[insn.word(1)] = 1;
1311            }
1312
1313            if (insn.word(2) == spv::DecorationPatch) {
1314                var_patch[insn.word(1)] = 1;
1315            }
1316        }
1317    }
1318
1319    /* TODO: handle grouped decorations */
1320    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1321     * have the same location, and we DON'T want to clobber. */
1322
1323    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1324       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1325       the word to determine which word contains the terminator. */
1326    uint32_t word = 3;
1327    while (entrypoint.word(word) & 0xff000000u) {
1328        ++word;
1329    }
1330    ++word;
1331
1332    for (; word < entrypoint.len(); word++) {
1333        auto insn = src->get_def(entrypoint.word(word));
1334        assert(insn != src->end());
1335        assert(insn.opcode() == spv::OpVariable);
1336
1337        if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
1338            unsigned id = insn.word(2);
1339            unsigned type = insn.word(1);
1340
1341            int location = value_or_default(var_locations, id, -1);
1342            int builtin = value_or_default(var_builtins, id, -1);
1343            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1344            bool is_patch = var_patch.find(id) != var_patch.end();
1345
1346            /* All variables and interface block members in the Input or Output storage classes
1347             * must be decorated with either a builtin or an explicit location.
1348             *
1349             * TODO: integrate the interface block support here. For now, don't complain --
1350             * a valid SPIRV module will only hit this path for the interface block case, as the
1351             * individual members of the type are decorated, rather than variable declarations.
1352             */
1353
1354            if (location != -1) {
1355                /* A user-defined interface variable, with a location. Where a variable
1356                 * occupied multiple locations, emit one result for each. */
1357                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1358                for (unsigned int offset = 0; offset < num_locations; offset++) {
1359                    interface_var v;
1360                    v.id = id;
1361                    v.type_id = type;
1362                    v.offset = offset;
1363                    v.is_patch = is_patch;
1364                    v.is_block_member = false;
1365                    out[std::make_pair(location + offset, component)] = v;
1366                }
1367            } else if (builtin == -1) {
1368                /* An interface block instance */
1369                collect_interface_block_members(src, out, blocks, is_array_of_verts, id, type, is_patch);
1370            }
1371        }
1372    }
1373}
1374
1375static void collect_interface_by_descriptor_slot(debug_report_data *report_data, shader_module const *src,
1376                                                 std::unordered_set<uint32_t> const &accessible_ids,
1377                                                 std::map<descriptor_slot_t, interface_var> &out) {
1378
1379    std::unordered_map<unsigned, unsigned> var_sets;
1380    std::unordered_map<unsigned, unsigned> var_bindings;
1381
1382    for (auto insn : *src) {
1383        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1384         * DecorationDescriptorSet and DecorationBinding.
1385         */
1386        if (insn.opcode() == spv::OpDecorate) {
1387            if (insn.word(2) == spv::DecorationDescriptorSet) {
1388                var_sets[insn.word(1)] = insn.word(3);
1389            }
1390
1391            if (insn.word(2) == spv::DecorationBinding) {
1392                var_bindings[insn.word(1)] = insn.word(3);
1393            }
1394        }
1395    }
1396
1397    for (auto id : accessible_ids) {
1398        auto insn = src->get_def(id);
1399        assert(insn != src->end());
1400
1401        if (insn.opcode() == spv::OpVariable &&
1402            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1403            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1404            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1405
1406            auto existing_it = out.find(std::make_pair(set, binding));
1407            if (existing_it != out.end()) {
1408                /* conflict within spv image */
1409                log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1410                        __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
1411                        "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition",
1412                        insn.word(2), insn.word(1), storage_class_name(insn.word(3)), existing_it->first.first,
1413                        existing_it->first.second);
1414            }
1415
1416            interface_var v;
1417            v.id = insn.word(2);
1418            v.type_id = insn.word(1);
1419            v.offset = 0;
1420            v.is_patch = false;
1421            v.is_block_member = false;
1422            out[std::make_pair(set, binding)] = v;
1423        }
1424    }
1425}
1426
1427static bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer,
1428                                              spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1429                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1430                                              shader_stage_attributes const *consumer_stage) {
1431    std::map<location_t, interface_var> outputs;
1432    std::map<location_t, interface_var> inputs;
1433
1434    bool pass = true;
1435
1436    collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, outputs, producer_stage->arrayed_output);
1437    collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, inputs, consumer_stage->arrayed_input);
1438
1439    auto a_it = outputs.begin();
1440    auto b_it = inputs.begin();
1441
1442    /* maps sorted by key (location); walk them together to find mismatches */
1443    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1444        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1445        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1446        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1447        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1448
1449        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1450            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1451                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1452                        "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1453                        a_first.second, consumer_stage->name)) {
1454                pass = false;
1455            }
1456            a_it++;
1457        } else if (a_at_end || a_first > b_first) {
1458            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1459                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1460                        "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second,
1461                        producer_stage->name)) {
1462                pass = false;
1463            }
1464            b_it++;
1465        } else {
1466            // subtleties of arrayed interfaces:
1467            // - if is_patch, then the member is not arrayed, even though the interface may be.
1468            // - if is_block_member, then the extra array level of an arrayed interface is not
1469            //   expressed in the member type -- it's expressed in the block type.
1470            if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1471                             producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1472                             consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member,
1473                             true)) {
1474                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1475                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1476                            a_first.first, a_first.second,
1477                            describe_type(producer, a_it->second.type_id).c_str(),
1478                            describe_type(consumer, b_it->second.type_id).c_str())) {
1479                    pass = false;
1480                }
1481            }
1482            if (a_it->second.is_patch != b_it->second.is_patch) {
1483                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1484                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1485                            "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1486                            "per-%s in %s stage", a_first.first, a_first.second,
1487                            a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1488                            b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1489                    pass = false;
1490                }
1491            }
1492            a_it++;
1493            b_it++;
1494        }
1495    }
1496
1497    return pass;
1498}
1499
1500enum FORMAT_TYPE {
1501    FORMAT_TYPE_UNDEFINED,
1502    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1503    FORMAT_TYPE_SINT,
1504    FORMAT_TYPE_UINT,
1505};
1506
1507static unsigned get_format_type(VkFormat fmt) {
1508    switch (fmt) {
1509    case VK_FORMAT_UNDEFINED:
1510        return FORMAT_TYPE_UNDEFINED;
1511    case VK_FORMAT_R8_SINT:
1512    case VK_FORMAT_R8G8_SINT:
1513    case VK_FORMAT_R8G8B8_SINT:
1514    case VK_FORMAT_R8G8B8A8_SINT:
1515    case VK_FORMAT_R16_SINT:
1516    case VK_FORMAT_R16G16_SINT:
1517    case VK_FORMAT_R16G16B16_SINT:
1518    case VK_FORMAT_R16G16B16A16_SINT:
1519    case VK_FORMAT_R32_SINT:
1520    case VK_FORMAT_R32G32_SINT:
1521    case VK_FORMAT_R32G32B32_SINT:
1522    case VK_FORMAT_R32G32B32A32_SINT:
1523    case VK_FORMAT_R64_SINT:
1524    case VK_FORMAT_R64G64_SINT:
1525    case VK_FORMAT_R64G64B64_SINT:
1526    case VK_FORMAT_R64G64B64A64_SINT:
1527    case VK_FORMAT_B8G8R8_SINT:
1528    case VK_FORMAT_B8G8R8A8_SINT:
1529    case VK_FORMAT_A8B8G8R8_SINT_PACK32:
1530    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1531    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1532        return FORMAT_TYPE_SINT;
1533    case VK_FORMAT_R8_UINT:
1534    case VK_FORMAT_R8G8_UINT:
1535    case VK_FORMAT_R8G8B8_UINT:
1536    case VK_FORMAT_R8G8B8A8_UINT:
1537    case VK_FORMAT_R16_UINT:
1538    case VK_FORMAT_R16G16_UINT:
1539    case VK_FORMAT_R16G16B16_UINT:
1540    case VK_FORMAT_R16G16B16A16_UINT:
1541    case VK_FORMAT_R32_UINT:
1542    case VK_FORMAT_R32G32_UINT:
1543    case VK_FORMAT_R32G32B32_UINT:
1544    case VK_FORMAT_R32G32B32A32_UINT:
1545    case VK_FORMAT_R64_UINT:
1546    case VK_FORMAT_R64G64_UINT:
1547    case VK_FORMAT_R64G64B64_UINT:
1548    case VK_FORMAT_R64G64B64A64_UINT:
1549    case VK_FORMAT_B8G8R8_UINT:
1550    case VK_FORMAT_B8G8R8A8_UINT:
1551    case VK_FORMAT_A8B8G8R8_UINT_PACK32:
1552    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1553    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1554        return FORMAT_TYPE_UINT;
1555    default:
1556        return FORMAT_TYPE_FLOAT;
1557    }
1558}
1559
1560/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1561 * for comparison to a VkFormat's characterization above. */
1562static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1563    auto insn = src->get_def(type);
1564    assert(insn != src->end());
1565
1566    switch (insn.opcode()) {
1567    case spv::OpTypeInt:
1568        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1569    case spv::OpTypeFloat:
1570        return FORMAT_TYPE_FLOAT;
1571    case spv::OpTypeVector:
1572        return get_fundamental_type(src, insn.word(2));
1573    case spv::OpTypeMatrix:
1574        return get_fundamental_type(src, insn.word(2));
1575    case spv::OpTypeArray:
1576        return get_fundamental_type(src, insn.word(2));
1577    case spv::OpTypePointer:
1578        return get_fundamental_type(src, insn.word(3));
1579    default:
1580        return FORMAT_TYPE_UNDEFINED;
1581    }
1582}
1583
1584static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1585    uint32_t bit_pos = u_ffs(stage);
1586    return bit_pos - 1;
1587}
1588
1589static bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1590    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1591     * each binding should be specified only once.
1592     */
1593    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1594    bool pass = true;
1595
1596    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1597        auto desc = &vi->pVertexBindingDescriptions[i];
1598        auto &binding = bindings[desc->binding];
1599        if (binding) {
1600            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1601                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1602                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1603                pass = false;
1604            }
1605        } else {
1606            binding = desc;
1607        }
1608    }
1609
1610    return pass;
1611}
1612
1613static bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
1614                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1615    std::map<location_t, interface_var> inputs;
1616    bool pass = true;
1617
1618    collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, inputs, false);
1619
1620    /* Build index by location */
1621    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1622    if (vi) {
1623        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1624            auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1625            for (auto j = 0u; j < num_locations; j++) {
1626                attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1627            }
1628        }
1629    }
1630
1631    auto it_a = attribs.begin();
1632    auto it_b = inputs.begin();
1633
1634    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1635        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1636        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1637        auto a_first = a_at_end ? 0 : it_a->first;
1638        auto b_first = b_at_end ? 0 : it_b->first.first;
1639        if (!a_at_end && (b_at_end || a_first < b_first)) {
1640            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1641                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1642                        "Vertex attribute at location %d not consumed by VS", a_first)) {
1643                pass = false;
1644            }
1645            it_a++;
1646        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1647            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1648                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
1649                        b_first)) {
1650                pass = false;
1651            }
1652            it_b++;
1653        } else {
1654            unsigned attrib_type = get_format_type(it_a->second->format);
1655            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1656
1657            /* type checking */
1658            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1659                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1660                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1661                            "Attribute type of `%s` at location %d does not match VS input type of `%s`",
1662                            string_VkFormat(it_a->second->format), a_first,
1663                            describe_type(vs, it_b->second.type_id).c_str())) {
1664                    pass = false;
1665                }
1666            }
1667
1668            /* OK! */
1669            it_a++;
1670            it_b++;
1671        }
1672    }
1673
1674    return pass;
1675}
1676
1677static bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs,
1678                                                    spirv_inst_iter entrypoint, RENDER_PASS_NODE const *rp, uint32_t subpass) {
1679    std::map<location_t, interface_var> outputs;
1680    std::map<uint32_t, VkFormat> color_attachments;
1681    for (auto i = 0u; i < rp->subpassColorFormats[subpass].size(); i++) {
1682        if (rp->subpassColorFormats[subpass][i] != VK_FORMAT_UNDEFINED) {
1683            color_attachments[i] = rp->subpassColorFormats[subpass][i];
1684        }
1685    }
1686
1687    bool pass = true;
1688
1689    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1690
1691    collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, outputs, false);
1692
1693    auto it_a = outputs.begin();
1694    auto it_b = color_attachments.begin();
1695
1696    /* Walk attachment list and outputs together */
1697
1698    while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
1699        bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
1700        bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
1701
1702        if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
1703            if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1704                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1705                        "FS writes to output location %d with no matching attachment", it_a->first.first)) {
1706                pass = false;
1707            }
1708            it_a++;
1709        } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
1710            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1711                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", it_b->first)) {
1712                pass = false;
1713            }
1714            it_b++;
1715        } else {
1716            unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
1717            unsigned att_type = get_format_type(it_b->second);
1718
1719            /* type checking */
1720            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1721                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1722                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1723                            "Attachment %d of type `%s` does not match FS output type of `%s`", it_b->first,
1724                            string_VkFormat(it_b->second),
1725                            describe_type(fs, it_a->second.type_id).c_str())) {
1726                    pass = false;
1727                }
1728            }
1729
1730            /* OK! */
1731            it_a++;
1732            it_b++;
1733        }
1734    }
1735
1736    return pass;
1737}
1738
1739/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
1740 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
1741 * for example.
1742 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1743 *  - NOT the shader input/output interfaces.
1744 *
1745 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1746 * converting parts of this to be generated from the machine-readable spec instead.
1747 */
1748static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) {
1749    std::unordered_set<uint32_t> worklist;
1750    worklist.insert(entrypoint.word(2));
1751
1752    while (!worklist.empty()) {
1753        auto id_iter = worklist.begin();
1754        auto id = *id_iter;
1755        worklist.erase(id_iter);
1756
1757        auto insn = src->get_def(id);
1758        if (insn == src->end()) {
1759            /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble
1760             * across all kinds of things here that we may not care about. */
1761            continue;
1762        }
1763
1764        /* try to add to the output set */
1765        if (!ids.insert(id).second) {
1766            continue; /* if we already saw this id, we don't want to walk it again. */
1767        }
1768
1769        switch (insn.opcode()) {
1770        case spv::OpFunction:
1771            /* scan whole body of the function, enlisting anything interesting */
1772            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1773                switch (insn.opcode()) {
1774                case spv::OpLoad:
1775                case spv::OpAtomicLoad:
1776                case spv::OpAtomicExchange:
1777                case spv::OpAtomicCompareExchange:
1778                case spv::OpAtomicCompareExchangeWeak:
1779                case spv::OpAtomicIIncrement:
1780                case spv::OpAtomicIDecrement:
1781                case spv::OpAtomicIAdd:
1782                case spv::OpAtomicISub:
1783                case spv::OpAtomicSMin:
1784                case spv::OpAtomicUMin:
1785                case spv::OpAtomicSMax:
1786                case spv::OpAtomicUMax:
1787                case spv::OpAtomicAnd:
1788                case spv::OpAtomicOr:
1789                case spv::OpAtomicXor:
1790                    worklist.insert(insn.word(3)); /* ptr */
1791                    break;
1792                case spv::OpStore:
1793                case spv::OpAtomicStore:
1794                    worklist.insert(insn.word(1)); /* ptr */
1795                    break;
1796                case spv::OpAccessChain:
1797                case spv::OpInBoundsAccessChain:
1798                    worklist.insert(insn.word(3)); /* base ptr */
1799                    break;
1800                case spv::OpSampledImage:
1801                case spv::OpImageSampleImplicitLod:
1802                case spv::OpImageSampleExplicitLod:
1803                case spv::OpImageSampleDrefImplicitLod:
1804                case spv::OpImageSampleDrefExplicitLod:
1805                case spv::OpImageSampleProjImplicitLod:
1806                case spv::OpImageSampleProjExplicitLod:
1807                case spv::OpImageSampleProjDrefImplicitLod:
1808                case spv::OpImageSampleProjDrefExplicitLod:
1809                case spv::OpImageFetch:
1810                case spv::OpImageGather:
1811                case spv::OpImageDrefGather:
1812                case spv::OpImageRead:
1813                case spv::OpImage:
1814                case spv::OpImageQueryFormat:
1815                case spv::OpImageQueryOrder:
1816                case spv::OpImageQuerySizeLod:
1817                case spv::OpImageQuerySize:
1818                case spv::OpImageQueryLod:
1819                case spv::OpImageQueryLevels:
1820                case spv::OpImageQuerySamples:
1821                case spv::OpImageSparseSampleImplicitLod:
1822                case spv::OpImageSparseSampleExplicitLod:
1823                case spv::OpImageSparseSampleDrefImplicitLod:
1824                case spv::OpImageSparseSampleDrefExplicitLod:
1825                case spv::OpImageSparseSampleProjImplicitLod:
1826                case spv::OpImageSparseSampleProjExplicitLod:
1827                case spv::OpImageSparseSampleProjDrefImplicitLod:
1828                case spv::OpImageSparseSampleProjDrefExplicitLod:
1829                case spv::OpImageSparseFetch:
1830                case spv::OpImageSparseGather:
1831                case spv::OpImageSparseDrefGather:
1832                case spv::OpImageTexelPointer:
1833                    worklist.insert(insn.word(3)); /* image or sampled image */
1834                    break;
1835                case spv::OpImageWrite:
1836                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
1837                    break;
1838                case spv::OpFunctionCall:
1839                    for (uint32_t i = 3; i < insn.len(); i++) {
1840                        worklist.insert(insn.word(i)); /* fn itself, and all args */
1841                    }
1842                    break;
1843
1844                case spv::OpExtInst:
1845                    for (uint32_t i = 5; i < insn.len(); i++) {
1846                        worklist.insert(insn.word(i)); /* operands to ext inst */
1847                    }
1848                    break;
1849                }
1850            }
1851            break;
1852        }
1853    }
1854}
1855
1856static bool validate_push_constant_block_against_pipeline(debug_report_data *report_data,
1857                                                          std::vector<VkPushConstantRange> const *pushConstantRanges,
1858                                                          shader_module const *src, spirv_inst_iter type,
1859                                                          VkShaderStageFlagBits stage) {
1860    bool pass = true;
1861
1862    /* strip off ptrs etc */
1863    type = get_struct_type(src, type, false);
1864    assert(type != src->end());
1865
1866    /* validate directly off the offsets. this isn't quite correct for arrays
1867     * and matrices, but is a good first step. TODO: arrays, matrices, weird
1868     * sizes */
1869    for (auto insn : *src) {
1870        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1871
1872            if (insn.word(3) == spv::DecorationOffset) {
1873                unsigned offset = insn.word(4);
1874                auto size = 4; /* bytes; TODO: calculate this based on the type */
1875
1876                bool found_range = false;
1877                for (auto const &range : *pushConstantRanges) {
1878                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
1879                        found_range = true;
1880
1881                        if ((range.stageFlags & stage) == 0) {
1882                            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1883                                        __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
1884                                        "Push constant range covering variable starting at "
1885                                        "offset %u not accessible from stage %s",
1886                                        offset, string_VkShaderStageFlagBits(stage))) {
1887                                pass = false;
1888                            }
1889                        }
1890
1891                        break;
1892                    }
1893                }
1894
1895                if (!found_range) {
1896                    if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1897                                __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
1898                                "Push constant range covering variable starting at "
1899                                "offset %u not declared in layout",
1900                                offset)) {
1901                        pass = false;
1902                    }
1903                }
1904            }
1905        }
1906    }
1907
1908    return pass;
1909}
1910
1911static bool validate_push_constant_usage(debug_report_data *report_data,
1912                                         std::vector<VkPushConstantRange> const *pushConstantRanges, shader_module const *src,
1913                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
1914    bool pass = true;
1915
1916    for (auto id : accessible_ids) {
1917        auto def_insn = src->get_def(id);
1918        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
1919            pass &= validate_push_constant_block_against_pipeline(report_data, pushConstantRanges, src,
1920                                                                 src->get_def(def_insn.word(1)), stage);
1921        }
1922    }
1923
1924    return pass;
1925}
1926
1927// For given pipelineLayout verify that the set_layout_node at slot.first
1928//  has the requested binding at slot.second and return ptr to that binding
1929static VkDescriptorSetLayoutBinding const * get_descriptor_binding(PIPELINE_LAYOUT_NODE *pipelineLayout, descriptor_slot_t slot) {
1930
1931    if (!pipelineLayout)
1932        return nullptr;
1933
1934    if (slot.first >= pipelineLayout->descriptorSetLayouts.size())
1935        return nullptr;
1936
1937    return pipelineLayout->setLayouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
1938}
1939
1940// Block of code at start here for managing/tracking Pipeline state that this layer cares about
1941
1942static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
1943
1944// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
1945//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
1946//   to that same cmd buffer by separate thread are not changing state from underneath us
1947// Track the last cmd buffer touched by this thread
1948
1949static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) {
1950    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
1951        if (pCB->drawCount[i])
1952            return true;
1953    }
1954    return false;
1955}
1956
1957// Check object status for selected flag state
1958static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
1959                            DRAW_STATE_ERROR error_code, const char *fail_msg) {
1960    if (!(pNode->status & status_mask)) {
1961        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1962                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS",
1963                       "CB object 0x%" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg);
1964    }
1965    return false;
1966}
1967
1968// Retrieve pipeline node ptr for given pipeline object
1969static PIPELINE_NODE *getPipeline(layer_data const *my_data, const VkPipeline pipeline) {
1970    auto it = my_data->pipelineMap.find(pipeline);
1971    if (it == my_data->pipelineMap.end()) {
1972        return nullptr;
1973    }
1974    return it->second;
1975}
1976
1977// Return true if for a given PSO, the given state enum is dynamic, else return false
1978static bool isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
1979    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
1980        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
1981            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
1982                return true;
1983        }
1984    }
1985    return false;
1986}
1987
1988// Validate state stored as flags at time of draw call
1989static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe, bool indexedDraw) {
1990    bool result;
1991    result = validate_status(dev_data, pCB, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_VIEWPORT_NOT_BOUND,
1992                             "Dynamic viewport state not set for this command buffer");
1993    result |= validate_status(dev_data, pCB, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_SCISSOR_NOT_BOUND,
1994                              "Dynamic scissor state not set for this command buffer");
1995    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
1996        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
1997         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
1998        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1999                                  DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
2000    }
2001    if (pPipe->graphicsPipelineCI.pRasterizationState &&
2002        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
2003        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2004                                  DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
2005    }
2006    if (pPipe->blendConstantsEnabled) {
2007        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2008                                  DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer");
2009    }
2010    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2011        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
2012        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2013                                  DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
2014    }
2015    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2016        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
2017        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2018                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
2019        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2020                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
2021        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2022                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
2023    }
2024    if (indexedDraw) {
2025        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2026                                  DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2027                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2028    }
2029    return result;
2030}
2031
2032// Verify attachment reference compatibility according to spec
2033//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2034//  If both AttachmentReference arrays have requested index, check their corresponding AttachementDescriptions
2035//   to make sure that format and samples counts match.
2036//  If not, they are not compatible.
2037static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2038                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2039                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2040                                             const VkAttachmentDescription *pSecondaryAttachments) {
2041    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2042        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2043            return true;
2044    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2045        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2046            return true;
2047    } else { // format and sample count must match
2048        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2049             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2050            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2051             pSecondaryAttachments[pSecondary[index].attachment].samples))
2052            return true;
2053    }
2054    // Format and sample counts didn't match
2055    return false;
2056}
2057
2058// For give primary and secondary RenderPass objects, verify that they're compatible
2059static bool verify_renderpass_compatibility(layer_data *my_data, const VkRenderPass primaryRP, const VkRenderPass secondaryRP,
2060                                            string &errorMsg) {
2061    if (my_data->renderPassMap.find(primaryRP) == my_data->renderPassMap.end()) {
2062        stringstream errorStr;
2063        errorStr << "invalid VkRenderPass (" << primaryRP << ")";
2064        errorMsg = errorStr.str();
2065        return false;
2066    } else if (my_data->renderPassMap.find(secondaryRP) == my_data->renderPassMap.end()) {
2067        stringstream errorStr;
2068        errorStr << "invalid VkRenderPass (" << secondaryRP << ")";
2069        errorMsg = errorStr.str();
2070        return false;
2071    }
2072    // Trivial pass case is exact same RP
2073    if (primaryRP == secondaryRP) {
2074        return true;
2075    }
2076    const VkRenderPassCreateInfo *primaryRPCI = my_data->renderPassMap[primaryRP]->pCreateInfo;
2077    const VkRenderPassCreateInfo *secondaryRPCI = my_data->renderPassMap[secondaryRP]->pCreateInfo;
2078    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2079        stringstream errorStr;
2080        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2081                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2082        errorMsg = errorStr.str();
2083        return false;
2084    }
2085    uint32_t spIndex = 0;
2086    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2087        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2088        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2089        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2090        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2091        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2092            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2093                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2094                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2095                stringstream errorStr;
2096                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2097                errorMsg = errorStr.str();
2098                return false;
2099            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2100                                                         primaryColorCount, primaryRPCI->pAttachments,
2101                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2102                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2103                stringstream errorStr;
2104                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2105                errorMsg = errorStr.str();
2106                return false;
2107            }
2108        }
2109
2110        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2111                                              1, primaryRPCI->pAttachments,
2112                                              secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2113                                              1, secondaryRPCI->pAttachments)) {
2114            stringstream errorStr;
2115            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2116            errorMsg = errorStr.str();
2117            return false;
2118        }
2119
2120        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2121        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2122        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2123        for (uint32_t i = 0; i < inputMax; ++i) {
2124            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2125                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2126                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2127                stringstream errorStr;
2128                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2129                errorMsg = errorStr.str();
2130                return false;
2131            }
2132        }
2133    }
2134    return true;
2135}
2136
2137// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
2138// pipelineLayout[layoutIndex]
2139static bool verify_set_layout_compatibility(layer_data *my_data, const cvdescriptorset::DescriptorSet *pSet,
2140                                            const VkPipelineLayout layout, const uint32_t layoutIndex, string &errorMsg) {
2141    auto pipeline_layout_it = my_data->pipelineLayoutMap.find(layout);
2142    if (pipeline_layout_it == my_data->pipelineLayoutMap.end()) {
2143        stringstream errorStr;
2144        errorStr << "invalid VkPipelineLayout (" << layout << ")";
2145        errorMsg = errorStr.str();
2146        return false;
2147    }
2148    if (layoutIndex >= pipeline_layout_it->second.descriptorSetLayouts.size()) {
2149        stringstream errorStr;
2150        errorStr << "VkPipelineLayout (" << layout << ") only contains " << pipeline_layout_it->second.descriptorSetLayouts.size()
2151                 << " setLayouts corresponding to sets 0-" << pipeline_layout_it->second.descriptorSetLayouts.size() - 1
2152                 << ", but you're attempting to bind set to index " << layoutIndex;
2153        errorMsg = errorStr.str();
2154        return false;
2155    }
2156    auto layout_node = my_data->descriptorSetLayoutMap[pipeline_layout_it->second.descriptorSetLayouts[layoutIndex]];
2157    return pSet->IsCompatible(layout_node, &errorMsg);
2158}
2159
2160// Validate that data for each specialization entry is fully contained within the buffer.
2161static bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) {
2162    bool pass = true;
2163
2164    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2165
2166    if (spec) {
2167        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2168            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2169                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2170                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2171                            "Specialization entry %u (for constant id %u) references memory outside provided "
2172                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2173                            " bytes provided)",
2174                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2175                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2176
2177                    pass = false;
2178                }
2179            }
2180        }
2181    }
2182
2183    return pass;
2184}
2185
2186static bool descriptor_type_match(shader_module const *module, uint32_t type_id,
2187                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2188    auto type = module->get_def(type_id);
2189
2190    descriptor_count = 1;
2191
2192    /* Strip off any array or ptrs. Where we remove array levels, adjust the
2193     * descriptor count for each dimension. */
2194    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2195        if (type.opcode() == spv::OpTypeArray) {
2196            descriptor_count *= get_constant_value(module, type.word(3));
2197            type = module->get_def(type.word(2));
2198        }
2199        else {
2200            type = module->get_def(type.word(3));
2201        }
2202    }
2203
2204    switch (type.opcode()) {
2205    case spv::OpTypeStruct: {
2206        for (auto insn : *module) {
2207            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2208                if (insn.word(2) == spv::DecorationBlock) {
2209                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2210                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2211                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2212                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2213                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2214                }
2215            }
2216        }
2217
2218        /* Invalid */
2219        return false;
2220    }
2221
2222    case spv::OpTypeSampler:
2223        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER;
2224
2225    case spv::OpTypeSampledImage:
2226        if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2227            /* Slight relaxation for some GLSL historical madness: samplerBuffer
2228             * doesn't really have a sampler, and a texel buffer descriptor
2229             * doesn't really provide one. Allow this slight mismatch.
2230             */
2231            auto image_type = module->get_def(type.word(2));
2232            auto dim = image_type.word(3);
2233            auto sampled = image_type.word(7);
2234            return dim == spv::DimBuffer && sampled == 1;
2235        }
2236        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2237
2238    case spv::OpTypeImage: {
2239        /* Many descriptor types backing image types-- depends on dimension
2240         * and whether the image will be used with a sampler. SPIRV for
2241         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2242         * runtime is unacceptable.
2243         */
2244        auto dim = type.word(3);
2245        auto sampled = type.word(7);
2246
2247        if (dim == spv::DimSubpassData) {
2248            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2249        } else if (dim == spv::DimBuffer) {
2250            if (sampled == 1) {
2251                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2252            } else {
2253                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2254            }
2255        } else if (sampled == 1) {
2256            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
2257        } else {
2258            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2259        }
2260    }
2261
2262    /* We shouldn't really see any other junk types -- but if we do, they're
2263     * a mismatch.
2264     */
2265    default:
2266        return false; /* Mismatch */
2267    }
2268}
2269
2270static bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) {
2271    if (!feature) {
2272        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2273                    __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2274                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2275                    "enabled on the device",
2276                    feature_name)) {
2277            return false;
2278        }
2279    }
2280
2281    return true;
2282}
2283
2284static bool validate_shader_capabilities(debug_report_data *report_data, shader_module const *src,
2285                                         VkPhysicalDeviceFeatures const *enabledFeatures) {
2286    bool pass = true;
2287
2288
2289    for (auto insn : *src) {
2290        if (insn.opcode() == spv::OpCapability) {
2291            switch (insn.word(1)) {
2292            case spv::CapabilityMatrix:
2293            case spv::CapabilityShader:
2294            case spv::CapabilityInputAttachment:
2295            case spv::CapabilitySampled1D:
2296            case spv::CapabilityImage1D:
2297            case spv::CapabilitySampledBuffer:
2298            case spv::CapabilityImageBuffer:
2299            case spv::CapabilityImageQuery:
2300            case spv::CapabilityDerivativeControl:
2301                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2302                break;
2303
2304            case spv::CapabilityGeometry:
2305                pass &= require_feature(report_data, enabledFeatures->geometryShader, "geometryShader");
2306                break;
2307
2308            case spv::CapabilityTessellation:
2309                pass &= require_feature(report_data, enabledFeatures->tessellationShader, "tessellationShader");
2310                break;
2311
2312            case spv::CapabilityFloat64:
2313                pass &= require_feature(report_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2314                break;
2315
2316            case spv::CapabilityInt64:
2317                pass &= require_feature(report_data, enabledFeatures->shaderInt64, "shaderInt64");
2318                break;
2319
2320            case spv::CapabilityTessellationPointSize:
2321            case spv::CapabilityGeometryPointSize:
2322                pass &= require_feature(report_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2323                                        "shaderTessellationAndGeometryPointSize");
2324                break;
2325
2326            case spv::CapabilityImageGatherExtended:
2327                pass &= require_feature(report_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2328                break;
2329
2330            case spv::CapabilityStorageImageMultisample:
2331                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2332                break;
2333
2334            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2335                pass &= require_feature(report_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2336                                        "shaderUniformBufferArrayDynamicIndexing");
2337                break;
2338
2339            case spv::CapabilitySampledImageArrayDynamicIndexing:
2340                pass &= require_feature(report_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2341                                        "shaderSampledImageArrayDynamicIndexing");
2342                break;
2343
2344            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2345                pass &= require_feature(report_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2346                                        "shaderStorageBufferArrayDynamicIndexing");
2347                break;
2348
2349            case spv::CapabilityStorageImageArrayDynamicIndexing:
2350                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2351                                        "shaderStorageImageArrayDynamicIndexing");
2352                break;
2353
2354            case spv::CapabilityClipDistance:
2355                pass &= require_feature(report_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2356                break;
2357
2358            case spv::CapabilityCullDistance:
2359                pass &= require_feature(report_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2360                break;
2361
2362            case spv::CapabilityImageCubeArray:
2363                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2364                break;
2365
2366            case spv::CapabilitySampleRateShading:
2367                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2368                break;
2369
2370            case spv::CapabilitySparseResidency:
2371                pass &= require_feature(report_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2372                break;
2373
2374            case spv::CapabilityMinLod:
2375                pass &= require_feature(report_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2376                break;
2377
2378            case spv::CapabilitySampledCubeArray:
2379                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2380                break;
2381
2382            case spv::CapabilityImageMSArray:
2383                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2384                break;
2385
2386            case spv::CapabilityStorageImageExtendedFormats:
2387                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageExtendedFormats,
2388                                        "shaderStorageImageExtendedFormats");
2389                break;
2390
2391            case spv::CapabilityInterpolationFunction:
2392                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2393                break;
2394
2395            case spv::CapabilityStorageImageReadWithoutFormat:
2396                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2397                                        "shaderStorageImageReadWithoutFormat");
2398                break;
2399
2400            case spv::CapabilityStorageImageWriteWithoutFormat:
2401                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2402                                        "shaderStorageImageWriteWithoutFormat");
2403                break;
2404
2405            case spv::CapabilityMultiViewport:
2406                pass &= require_feature(report_data, enabledFeatures->multiViewport, "multiViewport");
2407                break;
2408
2409            default:
2410                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2411                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2412                            "Shader declares capability %u, not supported in Vulkan.",
2413                            insn.word(1)))
2414                    pass = false;
2415                break;
2416            }
2417        }
2418    }
2419
2420    return pass;
2421}
2422
2423static bool validate_pipeline_shader_stage(debug_report_data *report_data,
2424                                           VkPipelineShaderStageCreateInfo const *pStage,
2425                                           PIPELINE_NODE *pipeline,
2426                                           shader_module **out_module,
2427                                           spirv_inst_iter *out_entrypoint,
2428                                           VkPhysicalDeviceFeatures const *enabledFeatures,
2429                                           std::unordered_map<VkShaderModule,
2430                                           std::unique_ptr<shader_module>> const &shaderModuleMap) {
2431    bool pass = true;
2432    auto module_it = shaderModuleMap.find(pStage->module);
2433    auto module = *out_module = module_it->second.get();
2434    pass &= validate_specialization_offsets(report_data, pStage);
2435
2436    /* find the entrypoint */
2437    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2438    if (entrypoint == module->end()) {
2439        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2440                    __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2441                    "No entrypoint found named `%s` for stage %s", pStage->pName,
2442                    string_VkShaderStageFlagBits(pStage->stage))) {
2443            pass = false;
2444        }
2445    }
2446
2447    /* validate shader capabilities against enabled device features */
2448    pass &= validate_shader_capabilities(report_data, module, enabledFeatures);
2449
2450    /* mark accessible ids */
2451    std::unordered_set<uint32_t> accessible_ids;
2452    mark_accessible_ids(module, entrypoint, accessible_ids);
2453
2454    /* validate descriptor set layout against what the entrypoint actually uses */
2455    std::map<descriptor_slot_t, interface_var> descriptor_uses;
2456    collect_interface_by_descriptor_slot(report_data, module, accessible_ids, descriptor_uses);
2457
2458    auto pipelineLayout = pipeline->pipelineLayout;
2459
2460    /* validate push constant usage */
2461    pass &= validate_push_constant_usage(report_data, &pipelineLayout->pushConstantRanges,
2462                                        module, accessible_ids, pStage->stage);
2463
2464    /* validate descriptor use */
2465    for (auto use : descriptor_uses) {
2466        // While validating shaders capture which slots are used by the pipeline
2467        pipeline->active_slots[use.first.first].insert(use.first.second);
2468
2469        /* verify given pipelineLayout has requested setLayout with requested binding */
2470        const auto & binding = get_descriptor_binding(pipelineLayout, use.first);
2471        unsigned required_descriptor_count;
2472
2473        if (!binding) {
2474            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2475                        __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2476                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2477                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2478                pass = false;
2479            }
2480        } else if (~binding->stageFlags & pStage->stage) {
2481            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2482                        /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2483                        "Shader uses descriptor slot %u.%u (used "
2484                        "as type `%s`) but descriptor not "
2485                        "accessible from stage %s",
2486                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2487                        string_VkShaderStageFlagBits(pStage->stage))) {
2488                pass = false;
2489            }
2490        } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType,
2491                                          /*out*/ required_descriptor_count)) {
2492            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2493                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", "Type mismatch on descriptor slot "
2494                                                                       "%u.%u (used as type `%s`) but "
2495                                                                       "descriptor of type %s",
2496                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2497                        string_VkDescriptorType(binding->descriptorType))) {
2498                pass = false;
2499            }
2500        } else if (binding->descriptorCount < required_descriptor_count) {
2501            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2502                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2503                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2504                        required_descriptor_count, use.first.first, use.first.second,
2505                        describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
2506                pass = false;
2507            }
2508        }
2509    }
2510
2511    return pass;
2512}
2513
2514
2515// Validate that the shaders used by the given pipeline and store the active_slots
2516//  that are actually used by the pipeline into pPipeline->active_slots
2517static bool validate_and_capture_pipeline_shader_state(debug_report_data *report_data, PIPELINE_NODE *pPipeline,
2518                                                       VkPhysicalDeviceFeatures const *enabledFeatures,
2519                                                       std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
2520    auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr();
2521    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2522    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2523
2524    shader_module *shaders[5];
2525    memset(shaders, 0, sizeof(shaders));
2526    spirv_inst_iter entrypoints[5];
2527    memset(entrypoints, 0, sizeof(entrypoints));
2528    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2529    bool pass = true;
2530
2531    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2532        auto pStage = &pCreateInfo->pStages[i];
2533        auto stage_id = get_shader_stage_id(pStage->stage);
2534        pass &= validate_pipeline_shader_stage(report_data, pStage, pPipeline,
2535                                               &shaders[stage_id], &entrypoints[stage_id],
2536                                               enabledFeatures, shaderModuleMap);
2537    }
2538
2539    vi = pCreateInfo->pVertexInputState;
2540
2541    if (vi) {
2542        pass &= validate_vi_consistency(report_data, vi);
2543    }
2544
2545    if (shaders[vertex_stage]) {
2546        pass &= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2547    }
2548
2549    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2550    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2551
2552    while (!shaders[producer] && producer != fragment_stage) {
2553        producer++;
2554        consumer++;
2555    }
2556
2557    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2558        assert(shaders[producer]);
2559        if (shaders[consumer]) {
2560            pass &= validate_interface_between_stages(report_data,
2561                                                      shaders[producer], entrypoints[producer], &shader_stage_attribs[producer],
2562                                                      shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]);
2563
2564            producer = consumer;
2565        }
2566    }
2567
2568    if (shaders[fragment_stage] && pPipeline->renderPass) {
2569        pass &= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage],
2570                                                        pPipeline->renderPass, pCreateInfo->subpass);
2571    }
2572
2573    return pass;
2574}
2575
2576static bool validate_compute_pipeline(debug_report_data *report_data, PIPELINE_NODE *pPipeline, VkPhysicalDeviceFeatures const *enabledFeatures,
2577                                      std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
2578    auto pCreateInfo = pPipeline->computePipelineCI.ptr();
2579
2580    shader_module *module;
2581    spirv_inst_iter entrypoint;
2582
2583    return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline,
2584                                          &module, &entrypoint, enabledFeatures, shaderModuleMap);
2585}
2586
2587// Return Set node ptr for specified set or else NULL
2588static cvdescriptorset::DescriptorSet *getSetNode(layer_data *my_data, const VkDescriptorSet set) {
2589    if (my_data->setMap.find(set) == my_data->setMap.end()) {
2590        return NULL;
2591    }
2592    return my_data->setMap[set];
2593}
2594// For the given command buffer, verify and update the state for activeSetBindingsPairs
2595//  This includes:
2596//  1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound.
2597//     To be valid, the dynamic offset combined with the offset and range from its
2598//     descriptor update must not overflow the size of its buffer being updated
2599//  2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images
2600//  3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers
2601static bool validate_and_update_drawtime_descriptor_state(
2602    layer_data *dev_data, GLOBAL_CB_NODE *pCB,
2603    const vector<std::tuple<cvdescriptorset::DescriptorSet *, unordered_set<uint32_t>,
2604                            std::vector<uint32_t> const *>> &activeSetBindingsPairs) {
2605    bool result = false;
2606    for (auto set_bindings_pair : activeSetBindingsPairs) {
2607        cvdescriptorset::DescriptorSet *set_node = std::get<0>(set_bindings_pair);
2608        std::string err_str;
2609        if (!set_node->ValidateDrawState(std::get<1>(set_bindings_pair), *std::get<2>(set_bindings_pair),
2610                                         &err_str)) {
2611            // Report error here
2612            auto set = set_node->GetSet();
2613            result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2614                              reinterpret_cast<const uint64_t &>(set), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2615                              "DS 0x%" PRIxLEAST64 " encountered the following validation error at draw time: %s",
2616                              reinterpret_cast<const uint64_t &>(set), err_str.c_str());
2617        }
2618        set_node->GetStorageUpdates(std::get<1>(set_bindings_pair), &pCB->updateBuffers, &pCB->updateImages);
2619    }
2620    return result;
2621}
2622// TODO : This is a temp function that naively updates bound storage images and buffers based on which descriptor sets are bound.
2623//   When validate_and_update_draw_state() handles compute shaders so that active_slots is correct for compute pipelines, this
2624//   function can be killed and validate_and_update_draw_state() used instead
2625static void update_shader_storage_images_and_buffers(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
2626    // For the bound descriptor sets, pull off any storage images and buffers
2627    //  This may be more than are actually updated depending on which are active, but for now this is a stop-gap for compute
2628    //  pipelines
2629    for (auto set : pCB->lastBound[VK_PIPELINE_BIND_POINT_COMPUTE].uniqueBoundSets) {
2630        set->GetAllStorageUpdates(&pCB->updateBuffers, &pCB->updateImages);
2631    }
2632}
2633
2634// For given pipeline, return number of MSAA samples, or one if MSAA disabled
2635static VkSampleCountFlagBits getNumSamples(PIPELINE_NODE const *pipe) {
2636    if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
2637        VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
2638        return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
2639    }
2640    return VK_SAMPLE_COUNT_1_BIT;
2641}
2642
2643// Validate draw-time state related to the PSO
2644static bool validatePipelineDrawtimeState(layer_data const *my_data, const GLOBAL_CB_NODE *pCB,
2645                                          const VkPipelineBindPoint pipelineBindPoint, PIPELINE_NODE const *pPipeline) {
2646    bool skip_call = false;
2647    if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
2648        // Verify that any MSAA request in PSO matches sample# in bound FB
2649        // Skip the check if rasterization is disabled.
2650        if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
2651            (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
2652            VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
2653            if (pCB->activeRenderPass) {
2654                const VkRenderPassCreateInfo *render_pass_info = pCB->activeRenderPass->pCreateInfo;
2655                const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
2656                VkSampleCountFlagBits subpass_num_samples = VkSampleCountFlagBits(0);
2657                uint32_t i;
2658
2659                const VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
2660                if ((color_blend_state != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) &&
2661                    (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount)) {
2662                    skip_call |=
2663                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2664                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
2665                                "Render pass subpass %u mismatch with blending state defined and blend state attachment "
2666                                "count %u while subpass color attachment count %u in Pipeline (0x%" PRIxLEAST64 ")!  These "
2667                                "must be the same at draw-time.",
2668                                pCB->activeSubpass, color_blend_state->attachmentCount, subpass_desc->colorAttachmentCount,
2669                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
2670                }
2671
2672                for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
2673                    VkSampleCountFlagBits samples;
2674
2675                    if (subpass_desc->pColorAttachments[i].attachment == VK_ATTACHMENT_UNUSED)
2676                        continue;
2677
2678                    samples = render_pass_info->pAttachments[subpass_desc->pColorAttachments[i].attachment].samples;
2679                    if (subpass_num_samples == static_cast<VkSampleCountFlagBits>(0)) {
2680                        subpass_num_samples = samples;
2681                    } else if (subpass_num_samples != samples) {
2682                        subpass_num_samples = static_cast<VkSampleCountFlagBits>(-1);
2683                        break;
2684                    }
2685                }
2686                if ((subpass_desc->pDepthStencilAttachment != NULL) &&
2687                    (subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
2688                    const VkSampleCountFlagBits samples =
2689                        render_pass_info->pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples;
2690                    if (subpass_num_samples == static_cast<VkSampleCountFlagBits>(0))
2691                        subpass_num_samples = samples;
2692                    else if (subpass_num_samples != samples)
2693                        subpass_num_samples = static_cast<VkSampleCountFlagBits>(-1);
2694                }
2695
2696                if (((subpass_desc->colorAttachmentCount > 0) || (subpass_desc->pDepthStencilAttachment != NULL)) &&
2697                    (pso_num_samples != subpass_num_samples)) {
2698                    skip_call |=
2699                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2700                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
2701                                "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
2702                                ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
2703                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples,
2704                                reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples);
2705                }
2706            } else {
2707                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2708                                     reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
2709                                     "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
2710                                     reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
2711            }
2712        }
2713        // TODO : Add more checks here
2714    } else {
2715        // TODO : Validate non-gfx pipeline updates
2716    }
2717    return skip_call;
2718}
2719
2720// Validate overall state at the time of a draw call
2721static bool validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, const bool indexedDraw,
2722                                           const VkPipelineBindPoint bindPoint) {
2723    bool result = false;
2724    auto const &state = pCB->lastBound[bindPoint];
2725    PIPELINE_NODE *pPipe = getPipeline(my_data, state.pipeline);
2726    if (nullptr == pPipe) {
2727        result |= log_msg(
2728            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2729            DRAWSTATE_INVALID_PIPELINE, "DS",
2730            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
2731        // Early return as any further checks below will be busted w/o a pipeline
2732        if (result)
2733            return true;
2734    }
2735    // First check flag states
2736    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
2737        result = validate_draw_state_flags(my_data, pCB, pPipe, indexedDraw);
2738    else {
2739        // First block of code below to validate active sets should eventually
2740        //  work for the compute case but currently doesn't so return early for now
2741        // TODO : When active sets in compute shaders are correctly parsed,
2742        //  stop returning early here and handle them in top block below
2743        return result;
2744    }
2745
2746    // Now complete other state checks
2747    // TODO : When Compute shaders are properly parsed, fix this section to validate them as well
2748    if (state.pipelineLayout) {
2749        string errorString;
2750        // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
2751        vector<std::tuple<cvdescriptorset::DescriptorSet *, unordered_set<uint32_t>, std::vector<uint32_t> const *>> activeSetBindingsPairs;
2752        for (auto & setBindingPair : pPipe->active_slots) {
2753            uint32_t setIndex = setBindingPair.first;
2754            // If valid set is not bound throw an error
2755            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
2756                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2757                                  DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
2758                                  "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline,
2759                                  setIndex);
2760            } else if (!verify_set_layout_compatibility(my_data, state.boundDescriptorSets[setIndex],
2761                                                        pPipe->graphicsPipelineCI.layout, setIndex, errorString)) {
2762                // Set is bound but not compatible w/ overlapping pipelineLayout from PSO
2763                VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
2764                result |=
2765                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2766                            (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
2767                            "VkDescriptorSet (0x%" PRIxLEAST64
2768                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
2769                            (uint64_t)setHandle, setIndex, (uint64_t)pPipe->graphicsPipelineCI.layout, errorString.c_str());
2770            } else { // Valid set is bound and layout compatible, validate that it's updated
2771                // Pull the set node
2772                cvdescriptorset::DescriptorSet *pSet = state.boundDescriptorSets[setIndex];
2773                // Save vector of all active sets to verify dynamicOffsets below
2774                activeSetBindingsPairs.push_back(std::make_tuple(pSet, setBindingPair.second,
2775                                                                 &state.dynamicOffsets[setIndex]));
2776                // Make sure set has been updated if it has no immutable samplers
2777                //  If it has immutable samplers, we'll flag error later as needed depending on binding
2778                if (!pSet->IsUpdated()) {
2779                    for (auto binding : setBindingPair.second) {
2780                        if (!pSet->GetImmutableSamplerPtrFromBinding(binding)) {
2781                            result |= log_msg(
2782                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2783                                (uint64_t)pSet->GetSet(), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2784                                "DS 0x%" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
2785                                "this will result in undefined behavior.",
2786                                (uint64_t)pSet->GetSet());
2787                        }
2788                    }
2789                }
2790            }
2791        }
2792        // For given active slots, verify any dynamic descriptors and record updated images & buffers
2793        result |= validate_and_update_drawtime_descriptor_state(my_data, pCB, activeSetBindingsPairs);
2794    }
2795    // TODO : If/when compute pipelines/shaders are handled above, code below is only for gfx bind poing
2796    //if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint) {
2797    // Verify Vtx binding
2798    if (pPipe->vertexBindingDescriptions.size() > 0) {
2799        for (size_t i = 0; i < pPipe->vertexBindingDescriptions.size(); i++) {
2800            if ((pCB->currentDrawData.buffers.size() < (i + 1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) {
2801                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2802                                  __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2803                                  "The Pipeline State Object (0x%" PRIxLEAST64
2804                                  ") expects that this Command Buffer's vertex binding Index " PRINTF_SIZE_T_SPECIFIER
2805                                  " should be set via vkCmdBindVertexBuffers.",
2806                                  (uint64_t)state.pipeline, i);
2807            }
2808        }
2809    } else {
2810        if (!pCB->currentDrawData.buffers.empty()) {
2811            result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
2812                              0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2813                              "Vertex buffers are bound to command buffer (0x%" PRIxLEAST64
2814                              ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
2815                              (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline);
2816        }
2817    }
2818    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2819    // Skip check if rasterization is disabled or there is no viewport.
2820    if ((!pPipe->graphicsPipelineCI.pRasterizationState ||
2821         (pPipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
2822        pPipe->graphicsPipelineCI.pViewportState) {
2823        bool dynViewport = isDynamic(pPipe, VK_DYNAMIC_STATE_VIEWPORT);
2824        bool dynScissor = isDynamic(pPipe, VK_DYNAMIC_STATE_SCISSOR);
2825        if (dynViewport) {
2826            if (pCB->viewports.size() != pPipe->graphicsPipelineCI.pViewportState->viewportCount) {
2827                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2828                                  __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2829                                  "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER
2830                                  ", but PSO viewportCount is %u. These counts must match.",
2831                                  pCB->viewports.size(), pPipe->graphicsPipelineCI.pViewportState->viewportCount);
2832            }
2833        }
2834        if (dynScissor) {
2835            if (pCB->scissors.size() != pPipe->graphicsPipelineCI.pViewportState->scissorCount) {
2836                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2837                                  __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2838                                  "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER
2839                                  ", but PSO scissorCount is %u. These counts must match.",
2840                                  pCB->scissors.size(), pPipe->graphicsPipelineCI.pViewportState->scissorCount);
2841            }
2842        }
2843    }
2844    //} // end of "if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint) {" block
2845
2846    // Check general pipeline state that needs to be validated at drawtime
2847    result |= validatePipelineDrawtimeState(my_data, pCB, bindPoint, pPipe);
2848
2849    return result;
2850}
2851
2852// Validate HW line width capabilities prior to setting requested line width.
2853static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) {
2854    bool skip_call = false;
2855
2856    // First check to see if the physical device supports wide lines.
2857    if ((VK_FALSE == my_data->phys_dev_properties.features.wideLines) && (1.0f != lineWidth)) {
2858        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__,
2859                             dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature "
2860                                            "not supported/enabled so lineWidth must be 1.0f!",
2861                             lineWidth);
2862    } else {
2863        // Otherwise, make sure the width falls in the valid range.
2864        if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
2865            (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
2866            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target,
2867                                 __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width "
2868                                                          "to between [%f, %f]!",
2869                                 lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0],
2870                                 my_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
2871        }
2872    }
2873
2874    return skip_call;
2875}
2876
2877// Verify that create state for a pipeline is valid
2878static bool verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
2879                                      int pipelineIndex) {
2880    bool skipCall = false;
2881
2882    PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
2883
2884    // If create derivative bit is set, check that we've specified a base
2885    // pipeline correctly, and that the base pipeline was created to allow
2886    // derivatives.
2887    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
2888        PIPELINE_NODE *pBasePipeline = nullptr;
2889        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
2890              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
2891            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2892                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
2893                                "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
2894        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
2895            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
2896                skipCall |=
2897                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2898                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
2899                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
2900            } else {
2901                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
2902            }
2903        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
2904            pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
2905        }
2906
2907        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
2908            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2909                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
2910                                "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
2911        }
2912    }
2913
2914    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
2915        if (!my_data->phys_dev_properties.features.independentBlend) {
2916            if (pPipeline->attachments.size() > 1) {
2917                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
2918                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
2919                    if ((pAttachments[0].blendEnable != pAttachments[i].blendEnable) ||
2920                        (pAttachments[0].srcColorBlendFactor != pAttachments[i].srcColorBlendFactor) ||
2921                        (pAttachments[0].dstColorBlendFactor != pAttachments[i].dstColorBlendFactor) ||
2922                        (pAttachments[0].colorBlendOp != pAttachments[i].colorBlendOp) ||
2923                        (pAttachments[0].srcAlphaBlendFactor != pAttachments[i].srcAlphaBlendFactor) ||
2924                        (pAttachments[0].dstAlphaBlendFactor != pAttachments[i].dstAlphaBlendFactor) ||
2925                        (pAttachments[0].alphaBlendOp != pAttachments[i].alphaBlendOp) ||
2926                        (pAttachments[0].colorWriteMask != pAttachments[i].colorWriteMask)) {
2927                        skipCall |=
2928                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2929                            DRAWSTATE_INDEPENDENT_BLEND, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
2930                            "enabled, all elements of pAttachments must be identical");
2931                    }
2932                }
2933            }
2934        }
2935        if (!my_data->phys_dev_properties.features.logicOp &&
2936            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
2937            skipCall |=
2938                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2939                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
2940                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
2941        }
2942        if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) &&
2943            ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) ||
2944             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
2945            skipCall |=
2946                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2947                        DRAWSTATE_INVALID_LOGIC_OP, "DS",
2948                        "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
2949        }
2950    }
2951
2952    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
2953    // produces nonsense errors that confuse users. Other layers should already
2954    // emit errors for renderpass being invalid.
2955    auto rp_data = my_data->renderPassMap.find(pPipeline->graphicsPipelineCI.renderPass);
2956    if (rp_data != my_data->renderPassMap.end() &&
2957        pPipeline->graphicsPipelineCI.subpass >= rp_data->second->pCreateInfo->subpassCount) {
2958        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2959                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
2960                                                                           "is out of range for this renderpass (0..%u)",
2961                            pPipeline->graphicsPipelineCI.subpass, rp_data->second->pCreateInfo->subpassCount - 1);
2962    }
2963
2964    if (!validate_and_capture_pipeline_shader_state(my_data->report_data, pPipeline, &my_data->phys_dev_properties.features,
2965                                                    my_data->shaderModuleMap)) {
2966        skipCall = true;
2967    }
2968    // Each shader's stage must be unique
2969    if (pPipeline->duplicate_shaders) {
2970        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
2971            if (pPipeline->duplicate_shaders & stage) {
2972                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2973                                    __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
2974                                    "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
2975                                    string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
2976            }
2977        }
2978    }
2979    // VS is required
2980    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
2981        skipCall |=
2982            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2983                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
2984    }
2985    // Either both or neither TC/TE shaders should be defined
2986    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
2987        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
2988        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2989                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
2990                            "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
2991    }
2992    // Compute shaders should be specified independent of Gfx shaders
2993    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
2994        (pPipeline->active_shaders &
2995         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
2996          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
2997        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2998                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
2999                            "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3000    }
3001    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3002    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3003    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3004        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3005         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3006        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3007                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3008                                                                           "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3009                                                                           "topology for tessellation pipelines");
3010    }
3011    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3012        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3013        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3014            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3015                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3016                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3017                                                                               "topology is only valid for tessellation pipelines");
3018        }
3019        if (!pPipeline->graphicsPipelineCI.pTessellationState) {
3020            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3021                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3022                                "Invalid Pipeline CreateInfo State: "
3023                                "pTessellationState is NULL when VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3024                                "topology used. pTessellationState must not be NULL in this case.");
3025        } else if (!pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints ||
3026                   (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints > 32)) {
3027            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3028                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3029                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3030                                                                               "topology used with patchControlPoints value %u."
3031                                                                               " patchControlPoints should be >0 and <=32.",
3032                                pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints);
3033        }
3034    }
3035    // If a rasterization state is provided, make sure that the line width conforms to the HW.
3036    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3037        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
3038            skipCall |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, reinterpret_cast<uint64_t &>(pPipeline),
3039                                        pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
3040        }
3041    }
3042    // Viewport state must be included if rasterization is enabled.
3043    // If the viewport state is included, the viewport and scissor counts should always match.
3044    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3045    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3046        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3047        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3048            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3049                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3050                                                                           "and scissors are dynamic PSO must include "
3051                                                                           "viewportCount and scissorCount in pViewportState.");
3052        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3053                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3054            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3055                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3056                                "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3057                                pPipeline->graphicsPipelineCI.pViewportState->viewportCount,
3058                                pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3059        } else {
3060            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3061            bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3062            bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3063            if (!dynViewport) {
3064                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3065                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3066                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3067                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3068                                        "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3069                                        "must either include pViewports data, or include viewport in pDynamicState and set it with "
3070                                        "vkCmdSetViewport().",
3071                                        pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3072                }
3073            }
3074            if (!dynScissor) {
3075                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3076                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3077                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3078                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3079                                        "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3080                                        "must either include pScissors data, or include scissor in pDynamicState and set it with "
3081                                        "vkCmdSetScissor().",
3082                                        pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3083                }
3084            }
3085        }
3086    }
3087    return skipCall;
3088}
3089
3090// Free the Pipeline nodes
3091static void deletePipelines(layer_data *my_data) {
3092    if (my_data->pipelineMap.size() <= 0)
3093        return;
3094    for (auto &pipe_map_pair : my_data->pipelineMap) {
3095        delete pipe_map_pair.second;
3096    }
3097    my_data->pipelineMap.clear();
3098}
3099
3100// Block of code at start here specifically for managing/tracking DSs
3101
3102// Return Pool node ptr for specified pool or else NULL
3103static DESCRIPTOR_POOL_NODE *getPoolNode(layer_data *my_data, const VkDescriptorPool pool) {
3104    if (my_data->descriptorPoolMap.find(pool) == my_data->descriptorPoolMap.end()) {
3105        return NULL;
3106    }
3107    return my_data->descriptorPoolMap[pool];
3108}
3109
3110// Return false if update struct is of valid type, otherwise flag error and return code from callback
3111static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3112    switch (pUpdateStruct->sType) {
3113    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3114    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3115        return false;
3116    default:
3117        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3118                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3119                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3120                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3121    }
3122}
3123
3124// Set count for given update struct in the last parameter
3125static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3126    switch (pUpdateStruct->sType) {
3127    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3128        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3129    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3130        // TODO : Need to understand this case better and make sure code is correct
3131        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3132    default:
3133        return 0;
3134    }
3135}
3136
3137// For given layout and update, return the first overall index of the layout that is updated
3138static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3139                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3140    return binding_start_index + arrayIndex;
3141}
3142// For given layout and update, return the last overall index of the layout that is updated
3143static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3144                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3145    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3146    return binding_start_index + arrayIndex + count - 1;
3147}
3148// Verify that the descriptor type in the update struct matches what's expected by the layout
3149static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type,
3150                                      const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3151    // First get actual type of update
3152    bool skipCall = false;
3153    VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM;
3154    switch (pUpdateStruct->sType) {
3155    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3156        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3157        break;
3158    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3159        /* no need to validate */
3160        return false;
3161        break;
3162    default:
3163        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3164                            DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3165                            "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3166                            string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3167    }
3168    if (!skipCall) {
3169        if (layout_type != actualType) {
3170            skipCall |= log_msg(
3171                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3172                DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3173                "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3174                string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type));
3175        }
3176    }
3177    return skipCall;
3178}
3179//TODO: Consolidate functions
3180bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
3181    layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
3182    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3183        return false;
3184    }
3185    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3186    imgpair.subresource.aspectMask = aspectMask;
3187    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3188    if (imgsubIt == pCB->imageLayoutMap.end()) {
3189        return false;
3190    }
3191    if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
3192        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3193                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3194                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3195                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout));
3196    }
3197    if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
3198        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3199                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3200                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s",
3201                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout));
3202    }
3203    node = imgsubIt->second;
3204    return true;
3205}
3206
3207bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
3208    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3209        return false;
3210    }
3211    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3212    imgpair.subresource.aspectMask = aspectMask;
3213    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3214    if (imgsubIt == my_data->imageLayoutMap.end()) {
3215        return false;
3216    }
3217    if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
3218        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3219                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3220                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3221                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout));
3222    }
3223    layout = imgsubIt->second.layout;
3224    return true;
3225}
3226
3227// find layout(s) on the cmd buf level
3228bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3229    ImageSubresourcePair imgpair = {image, true, range};
3230    node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
3231    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
3232    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
3233    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
3234    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
3235    if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3236        imgpair = {image, false, VkImageSubresource()};
3237        auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3238        if (imgsubIt == pCB->imageLayoutMap.end())
3239            return false;
3240        node = imgsubIt->second;
3241    }
3242    return true;
3243}
3244
3245// find layout(s) on the global level
3246bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3247    layout = VK_IMAGE_LAYOUT_MAX_ENUM;
3248    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3249    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3250    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3251    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3252    if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3253        imgpair = {imgpair.image, false, VkImageSubresource()};
3254        auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3255        if (imgsubIt == my_data->imageLayoutMap.end())
3256            return false;
3257        layout = imgsubIt->second.layout;
3258    }
3259    return true;
3260}
3261
3262bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3263    ImageSubresourcePair imgpair = {image, true, range};
3264    return FindLayout(my_data, imgpair, layout);
3265}
3266
3267bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3268    auto sub_data = my_data->imageSubresourceMap.find(image);
3269    if (sub_data == my_data->imageSubresourceMap.end())
3270        return false;
3271    auto imgIt = my_data->imageMap.find(image);
3272    if (imgIt == my_data->imageMap.end())
3273        return false;
3274    bool ignoreGlobal = false;
3275    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3276    // potential errors in this case.
3277    if (sub_data->second.size() >= (imgIt->second.createInfo.arrayLayers * imgIt->second.createInfo.mipLevels + 1)) {
3278        ignoreGlobal = true;
3279    }
3280    for (auto imgsubpair : sub_data->second) {
3281        if (ignoreGlobal && !imgsubpair.hasSubresource)
3282            continue;
3283        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3284        if (img_data != my_data->imageLayoutMap.end()) {
3285            layouts.push_back(img_data->second.layout);
3286        }
3287    }
3288    return true;
3289}
3290
3291// Set the layout on the global level
3292void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3293    VkImage &image = imgpair.image;
3294    // TODO (mlentine): Maybe set format if new? Not used atm.
3295    my_data->imageLayoutMap[imgpair].layout = layout;
3296    // TODO (mlentine): Maybe make vector a set?
3297    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3298    if (subresource == my_data->imageSubresourceMap[image].end()) {
3299        my_data->imageSubresourceMap[image].push_back(imgpair);
3300    }
3301}
3302
3303// Set the layout on the cmdbuf level
3304void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3305    pCB->imageLayoutMap[imgpair] = node;
3306    // TODO (mlentine): Maybe make vector a set?
3307    auto subresource =
3308        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3309    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3310        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3311    }
3312}
3313
3314void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3315    // TODO (mlentine): Maybe make vector a set?
3316    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3317        pCB->imageSubresourceMap[imgpair.image].end()) {
3318        pCB->imageLayoutMap[imgpair].layout = layout;
3319    } else {
3320        // TODO (mlentine): Could be expensive and might need to be removed.
3321        assert(imgpair.hasSubresource);
3322        IMAGE_CMD_BUF_LAYOUT_NODE node;
3323        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3324            node.initialLayout = layout;
3325        }
3326        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3327    }
3328}
3329
3330template <class OBJECT, class LAYOUT>
3331void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3332    if (imgpair.subresource.aspectMask & aspectMask) {
3333        imgpair.subresource.aspectMask = aspectMask;
3334        SetLayout(pObject, imgpair, layout);
3335    }
3336}
3337
3338template <class OBJECT, class LAYOUT>
3339void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3340    ImageSubresourcePair imgpair = {image, true, range};
3341    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3342    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3343    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3344    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3345}
3346
3347template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3348    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3349    SetLayout(pObject, image, imgpair, layout);
3350}
3351
3352void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3353    auto image_view_data = dev_data->imageViewMap.find(imageView);
3354    assert(image_view_data != dev_data->imageViewMap.end());
3355    const VkImage &image = image_view_data->second.image;
3356    const VkImageSubresourceRange &subRange = image_view_data->second.subresourceRange;
3357    // TODO: Do not iterate over every possibility - consolidate where possible
3358    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3359        uint32_t level = subRange.baseMipLevel + j;
3360        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3361            uint32_t layer = subRange.baseArrayLayer + k;
3362            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3363            SetLayout(pCB, image, sub, layout);
3364        }
3365    }
3366}
3367
3368// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3369// func_str is the name of the calling function
3370// Return false if no errors occur
3371// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
3372static bool validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
3373    bool skip_call = false;
3374    auto set_node = my_data->setMap.find(set);
3375    if (set_node == my_data->setMap.end()) {
3376        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3377                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3378                             "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3379                             (uint64_t)(set));
3380    } else {
3381        if (set_node->second->in_use.load()) {
3382            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3383                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
3384                                 "DS", "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer.",
3385                                 func_str.c_str(), (uint64_t)(set));
3386        }
3387    }
3388    return skip_call;
3389}
3390// update DS mappings based on write and copy update arrays
3391static bool dsUpdate(layer_data *my_data, VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pWDS,
3392                     uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pCDS) {
3393    bool skip_call = false;
3394    // Validate Write updates
3395    uint32_t i = 0;
3396    for (i = 0; i < descriptorWriteCount; i++) {
3397        auto dest_set = pWDS[i].dstSet;
3398        auto set_pair = my_data->setMap.find(dest_set);
3399        if (set_pair == my_data->setMap.end()) {
3400            skip_call |=
3401                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3402                        reinterpret_cast<uint64_t &>(dest_set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3403                        "Cannot call vkUpdateDescriptorSets() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.",
3404                        reinterpret_cast<uint64_t &>(dest_set));
3405        } else {
3406            string error_str;
3407            if (!set_pair->second->WriteUpdate(my_data->report_data, &pWDS[i], &error_str)) {
3408                skip_call |=
3409                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3410                            reinterpret_cast<uint64_t &>(dest_set), __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
3411                            "vkUpdateDescriptorsSets() failed write update for Descriptor Set 0x%" PRIx64 " with error: %s",
3412                            reinterpret_cast<uint64_t &>(dest_set), error_str.c_str());
3413            }
3414        }
3415    }
3416    // Now validate copy updates
3417    for (i = 0; i < descriptorCopyCount; ++i) {
3418        auto dst_set = pCDS[i].dstSet;
3419        auto src_set = pCDS[i].srcSet;
3420        auto src_pair = my_data->setMap.find(src_set);
3421        auto dst_pair = my_data->setMap.find(dst_set);
3422        if (src_pair == my_data->setMap.end()) {
3423            skip_call |= log_msg(
3424                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3425                reinterpret_cast<uint64_t &>(src_set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3426                "Cannot call vkUpdateDescriptorSets() to copy from descriptor set 0x%" PRIxLEAST64 " that has not been allocated.",
3427                reinterpret_cast<uint64_t &>(src_set));
3428        } else if (dst_pair == my_data->setMap.end()) {
3429            skip_call |= log_msg(
3430                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3431                reinterpret_cast<uint64_t &>(dst_set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3432                "Cannot call vkUpdateDescriptorSets() to copy to descriptor set 0x%" PRIxLEAST64 " that has not been allocated.",
3433                reinterpret_cast<uint64_t &>(dst_set));
3434        } else {
3435            std::string error_str;
3436            if (!dst_pair->second->CopyUpdate(my_data->report_data, &pCDS[i], src_pair->second, &error_str)) {
3437                skip_call |=
3438                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3439                            reinterpret_cast<uint64_t &>(dst_set), __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
3440                            "vkUpdateDescriptorsSets() failed copy update from Descriptor Set 0x%" PRIx64
3441                            " to Descriptor Set 0x%" PRIx64 " with error: %s",
3442                            reinterpret_cast<uint64_t &>(src_set), reinterpret_cast<uint64_t &>(dst_set), error_str.c_str());
3443            }
3444        }
3445    }
3446    return skip_call;
3447}
3448
3449// Verify that given pool has descriptors that are being requested for allocation.
3450// NOTE : Calls to this function should be wrapped in mutex
3451static bool validate_descriptor_availability_in_pool(layer_data *dev_data, DESCRIPTOR_POOL_NODE *pPoolNode, uint32_t count,
3452                                                     const VkDescriptorSetLayout *pSetLayouts) {
3453    bool skipCall = false;
3454
3455    // Track number of descriptorSets allowable in this pool
3456    if (pPoolNode->availableSets < count) {
3457        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
3458                            reinterpret_cast<uint64_t &>(pPoolNode->pool), __LINE__, DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
3459                            "Unable to allocate %u descriptorSets from pool 0x%" PRIxLEAST64
3460                            ". This pool only has %d descriptorSets remaining.",
3461                            count, reinterpret_cast<uint64_t &>(pPoolNode->pool), pPoolNode->availableSets);
3462    } else {
3463        pPoolNode->availableSets -= count;
3464    }
3465
3466    for (uint32_t i = 0; i < count; ++i) {
3467        auto layout_pair = dev_data->descriptorSetLayoutMap.find(pSetLayouts[i]);
3468        if (layout_pair == dev_data->descriptorSetLayoutMap.end()) {
3469            skipCall |=
3470                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
3471                        (uint64_t)pSetLayouts[i], __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3472                        "Unable to find set layout node for layout 0x%" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
3473                        (uint64_t)pSetLayouts[i]);
3474        } else {
3475            auto &layout_node = layout_pair->second;
3476            for (uint32_t j = 0; j < layout_node->GetBindingCount(); ++j) {
3477                const auto &binding_layout = layout_node->GetDescriptorSetLayoutBindingPtrFromIndex(j);
3478                uint32_t typeIndex = static_cast<uint32_t>(binding_layout->descriptorType);
3479                uint32_t poolSizeCount = binding_layout->descriptorCount;
3480                if (poolSizeCount > pPoolNode->availableDescriptorTypeCount[typeIndex]) {
3481                    skipCall |= log_msg(
3482                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
3483                        reinterpret_cast<const uint64_t &>(pSetLayouts[i]), __LINE__, DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
3484                        "Unable to allocate %u descriptors of type %s from pool 0x%" PRIxLEAST64
3485                        ". This pool only has %d descriptors of this type remaining.",
3486                        poolSizeCount, string_VkDescriptorType(binding_layout->descriptorType), (uint64_t)pPoolNode->pool,
3487                        pPoolNode->availableDescriptorTypeCount[typeIndex]);
3488                } else { // Decrement available descriptors of this type
3489                    pPoolNode->availableDescriptorTypeCount[typeIndex] -= poolSizeCount;
3490                }
3491            }
3492        }
3493    }
3494    return skipCall;
3495}
3496
3497// Free the descriptor set, remove it from setMap and invalidate any cmd buffers that it was bound to
3498static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
3499    dev_data->setMap.erase(descriptor_set->GetSet());
3500    delete descriptor_set;
3501}
3502// Free all DS Pools including their Sets & related sub-structs
3503// NOTE : Calls to this function should be wrapped in mutex
3504static void deletePools(layer_data *my_data) {
3505    if (my_data->descriptorPoolMap.size() <= 0)
3506        return;
3507    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
3508        // Remove this pools' sets from setMap and delete them
3509        for (auto ds : (*ii).second->sets) {
3510            freeDescriptorSet(my_data, ds);
3511        }
3512        (*ii).second->sets.clear();
3513    }
3514    my_data->descriptorPoolMap.clear();
3515}
3516
3517static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
3518                                VkDescriptorPoolResetFlags flags) {
3519    DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
3520    if (!pPool) {
3521        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
3522                (uint64_t)pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
3523                "Unable to find pool node for pool 0x%" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t)pool);
3524    } else {
3525        // TODO: validate flags
3526        // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
3527        for (auto ds : pPool->sets) {
3528            freeDescriptorSet(my_data, ds);
3529        }
3530        pPool->sets.clear();
3531        // Reset available count for each type and available sets for this pool
3532        for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
3533            pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
3534        }
3535        pPool->availableSets = pPool->maxSets;
3536    }
3537}
3538
3539// For given CB object, fetch associated CB Node from map
3540static GLOBAL_CB_NODE *getCBNode(layer_data const *my_data, const VkCommandBuffer cb) {
3541    auto it = my_data->commandBufferMap.find(cb);
3542    if (it == my_data->commandBufferMap.end()) {
3543        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3544                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3545                "Attempt to use CommandBuffer 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
3546        return NULL;
3547    }
3548    return it->second;
3549}
3550// Free all CB Nodes
3551// NOTE : Calls to this function should be wrapped in mutex
3552static void deleteCommandBuffers(layer_data *my_data) {
3553    if (my_data->commandBufferMap.empty()) {
3554        return;
3555    }
3556    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
3557        delete (*ii).second;
3558    }
3559    my_data->commandBufferMap.clear();
3560}
3561
3562static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
3563    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3564                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
3565                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
3566}
3567
3568bool validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
3569    if (!pCB->activeRenderPass)
3570        return false;
3571    bool skip_call = false;
3572    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
3573        (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
3574        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3575                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3576                             "Commands cannot be called in a subpass using secondary command buffers.");
3577    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
3578        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3579                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3580                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
3581    }
3582    return skip_call;
3583}
3584
3585static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3586    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
3587        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3588                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3589                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3590    return false;
3591}
3592
3593static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3594    if (!(flags & VK_QUEUE_COMPUTE_BIT))
3595        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3596                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3597                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
3598    return false;
3599}
3600
3601static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3602    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
3603        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3604                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3605                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3606    return false;
3607}
3608
3609// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
3610//  in the recording state or if there's an issue with the Cmd ordering
3611static bool addCmd(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
3612    bool skipCall = false;
3613    auto pool_data = my_data->commandPoolMap.find(pCB->createInfo.commandPool);
3614    if (pool_data != my_data->commandPoolMap.end()) {
3615        VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pool_data->second.queueFamilyIndex].queueFlags;
3616        switch (cmd) {
3617        case CMD_BINDPIPELINE:
3618        case CMD_BINDPIPELINEDELTA:
3619        case CMD_BINDDESCRIPTORSETS:
3620        case CMD_FILLBUFFER:
3621        case CMD_CLEARCOLORIMAGE:
3622        case CMD_SETEVENT:
3623        case CMD_RESETEVENT:
3624        case CMD_WAITEVENTS:
3625        case CMD_BEGINQUERY:
3626        case CMD_ENDQUERY:
3627        case CMD_RESETQUERYPOOL:
3628        case CMD_COPYQUERYPOOLRESULTS:
3629        case CMD_WRITETIMESTAMP:
3630            skipCall |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3631            break;
3632        case CMD_SETVIEWPORTSTATE:
3633        case CMD_SETSCISSORSTATE:
3634        case CMD_SETLINEWIDTHSTATE:
3635        case CMD_SETDEPTHBIASSTATE:
3636        case CMD_SETBLENDSTATE:
3637        case CMD_SETDEPTHBOUNDSSTATE:
3638        case CMD_SETSTENCILREADMASKSTATE:
3639        case CMD_SETSTENCILWRITEMASKSTATE:
3640        case CMD_SETSTENCILREFERENCESTATE:
3641        case CMD_BINDINDEXBUFFER:
3642        case CMD_BINDVERTEXBUFFER:
3643        case CMD_DRAW:
3644        case CMD_DRAWINDEXED:
3645        case CMD_DRAWINDIRECT:
3646        case CMD_DRAWINDEXEDINDIRECT:
3647        case CMD_BLITIMAGE:
3648        case CMD_CLEARATTACHMENTS:
3649        case CMD_CLEARDEPTHSTENCILIMAGE:
3650        case CMD_RESOLVEIMAGE:
3651        case CMD_BEGINRENDERPASS:
3652        case CMD_NEXTSUBPASS:
3653        case CMD_ENDRENDERPASS:
3654            skipCall |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
3655            break;
3656        case CMD_DISPATCH:
3657        case CMD_DISPATCHINDIRECT:
3658            skipCall |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3659            break;
3660        case CMD_COPYBUFFER:
3661        case CMD_COPYIMAGE:
3662        case CMD_COPYBUFFERTOIMAGE:
3663        case CMD_COPYIMAGETOBUFFER:
3664        case CMD_CLONEIMAGEDATA:
3665        case CMD_UPDATEBUFFER:
3666        case CMD_PIPELINEBARRIER:
3667        case CMD_EXECUTECOMMANDS:
3668        case CMD_END:
3669            break;
3670        default:
3671            break;
3672        }
3673    }
3674    if (pCB->state != CB_RECORDING) {
3675        skipCall |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
3676    } else {
3677        skipCall |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
3678        CMD_NODE cmdNode = {};
3679        // init cmd node and append to end of cmd LL
3680        cmdNode.cmdNumber = ++pCB->numCmds;
3681        cmdNode.type = cmd;
3682        pCB->cmds.push_back(cmdNode);
3683    }
3684    return skipCall;
3685}
3686// Reset the command buffer state
3687//  Maintain the createInfo and set state to CB_NEW, but clear all other state
3688static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
3689    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
3690    if (pCB) {
3691        pCB->in_use.store(0);
3692        pCB->cmds.clear();
3693        // Reset CB state (note that createInfo is not cleared)
3694        pCB->commandBuffer = cb;
3695        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
3696        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
3697        pCB->numCmds = 0;
3698        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
3699        pCB->state = CB_NEW;
3700        pCB->submitCount = 0;
3701        pCB->status = 0;
3702        pCB->viewports.clear();
3703        pCB->scissors.clear();
3704
3705        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
3706            // Before clearing lastBoundState, remove any CB bindings from all uniqueBoundSets
3707            for (auto set : pCB->lastBound[i].uniqueBoundSets) {
3708                set->RemoveBoundCommandBuffer(pCB);
3709            }
3710            pCB->lastBound[i].reset();
3711        }
3712
3713        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
3714        pCB->activeRenderPass = nullptr;
3715        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
3716        pCB->activeSubpass = 0;
3717        pCB->lastSubmittedFence = VK_NULL_HANDLE;
3718        pCB->lastSubmittedQueue = VK_NULL_HANDLE;
3719        pCB->destroyedSets.clear();
3720        pCB->updatedSets.clear();
3721        pCB->destroyedFramebuffers.clear();
3722        pCB->waitedEvents.clear();
3723        pCB->semaphores.clear();
3724        pCB->events.clear();
3725        pCB->waitedEventsBeforeQueryReset.clear();
3726        pCB->queryToStateMap.clear();
3727        pCB->activeQueries.clear();
3728        pCB->startedQueries.clear();
3729        pCB->imageSubresourceMap.clear();
3730        pCB->imageLayoutMap.clear();
3731        pCB->eventToStageMap.clear();
3732        pCB->drawData.clear();
3733        pCB->currentDrawData.buffers.clear();
3734        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
3735        // Make sure any secondaryCommandBuffers are removed from globalInFlight
3736        for (auto secondary_cb : pCB->secondaryCommandBuffers) {
3737            dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
3738        }
3739        pCB->secondaryCommandBuffers.clear();
3740        pCB->updateImages.clear();
3741        pCB->updateBuffers.clear();
3742        clear_cmd_buf_and_mem_references(dev_data, pCB);
3743        pCB->eventUpdates.clear();
3744
3745        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
3746        for (auto framebuffer : pCB->framebuffers) {
3747            auto fbNode = dev_data->frameBufferMap.find(framebuffer);
3748            if (fbNode != dev_data->frameBufferMap.end()) {
3749                fbNode->second.referencingCmdBuffers.erase(pCB->commandBuffer);
3750            }
3751        }
3752        pCB->framebuffers.clear();
3753        pCB->activeFramebuffer = VK_NULL_HANDLE;
3754    }
3755}
3756
3757// Set PSO-related status bits for CB, including dynamic state set via PSO
3758static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
3759    // Account for any dynamic state not set via this PSO
3760    if (!pPipe->graphicsPipelineCI.pDynamicState ||
3761        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static
3762        pCB->status = CBSTATUS_ALL;
3763    } else {
3764        // First consider all state on
3765        // Then unset any state that's noted as dynamic in PSO
3766        // Finally OR that into CB statemask
3767        CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
3768        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
3769            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
3770            case VK_DYNAMIC_STATE_VIEWPORT:
3771                psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET;
3772                break;
3773            case VK_DYNAMIC_STATE_SCISSOR:
3774                psoDynStateMask &= ~CBSTATUS_SCISSOR_SET;
3775                break;
3776            case VK_DYNAMIC_STATE_LINE_WIDTH:
3777                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
3778                break;
3779            case VK_DYNAMIC_STATE_DEPTH_BIAS:
3780                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
3781                break;
3782            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
3783                psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
3784                break;
3785            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
3786                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
3787                break;
3788            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
3789                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
3790                break;
3791            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
3792                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
3793                break;
3794            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
3795                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
3796                break;
3797            default:
3798                // TODO : Flag error here
3799                break;
3800            }
3801        }
3802        pCB->status |= psoDynStateMask;
3803    }
3804}
3805
3806// Print the last bound Gfx Pipeline
3807static bool printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
3808    bool skipCall = false;
3809    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
3810    if (pCB) {
3811        PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
3812        if (!pPipeTrav) {
3813            // nothing to print
3814        } else {
3815            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3816                                __LINE__, DRAWSTATE_NONE, "DS", "%s",
3817                                vk_print_vkgraphicspipelinecreateinfo(
3818                                    reinterpret_cast<const VkGraphicsPipelineCreateInfo *>(&pPipeTrav->graphicsPipelineCI), "{DS}")
3819                                    .c_str());
3820        }
3821    }
3822    return skipCall;
3823}
3824
3825static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
3826    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
3827    if (pCB && pCB->cmds.size() > 0) {
3828        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3829                DRAWSTATE_NONE, "DS", "Cmds in CB 0x%p", (void *)cb);
3830        vector<CMD_NODE> cmds = pCB->cmds;
3831        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
3832            // TODO : Need to pass cb as srcObj here
3833            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
3834                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD 0x%" PRIx64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
3835        }
3836    } else {
3837        // Nothing to print
3838    }
3839}
3840
3841static bool synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
3842    bool skipCall = false;
3843    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
3844        return skipCall;
3845    }
3846    skipCall |= printPipeline(my_data, cb);
3847    return skipCall;
3848}
3849
3850// Flags validation error if the associated call is made inside a render pass. The apiName
3851// routine should ONLY be called outside a render pass.
3852static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
3853    bool inside = false;
3854    if (pCB->activeRenderPass) {
3855        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3856                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
3857                         "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 ")", apiName,
3858                         (uint64_t)pCB->activeRenderPass->renderPass);
3859    }
3860    return inside;
3861}
3862
3863// Flags validation error if the associated call is made outside a render pass. The apiName
3864// routine should ONLY be called inside a render pass.
3865static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
3866    bool outside = false;
3867    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
3868        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
3869         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
3870        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3871                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
3872                          "%s: This call must be issued inside an active render pass.", apiName);
3873    }
3874    return outside;
3875}
3876
3877static void init_core_validation(layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
3878
3879    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
3880
3881}
3882
3883VKAPI_ATTR VkResult VKAPI_CALL
3884CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
3885    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
3886
3887    assert(chain_info->u.pLayerInfo);
3888    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
3889    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
3890    if (fpCreateInstance == NULL)
3891        return VK_ERROR_INITIALIZATION_FAILED;
3892
3893    // Advance the link info for the next element on the chain
3894    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
3895
3896    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
3897    if (result != VK_SUCCESS)
3898        return result;
3899
3900    layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
3901    instance_data->instance = *pInstance;
3902    instance_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
3903    layer_init_instance_dispatch_table(*pInstance, instance_data->instance_dispatch_table, fpGetInstanceProcAddr);
3904
3905    instance_data->report_data =
3906        debug_report_create_instance(instance_data->instance_dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount,
3907                                     pCreateInfo->ppEnabledExtensionNames);
3908
3909    init_core_validation(instance_data, pAllocator);
3910
3911    ValidateLayerOrdering(*pCreateInfo);
3912
3913    return result;
3914}
3915
3916/* hook DestroyInstance to remove tableInstanceMap entry */
3917VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
3918    // TODOSC : Shouldn't need any customization here
3919    dispatch_key key = get_dispatch_key(instance);
3920    // TBD: Need any locking this early, in case this function is called at the
3921    // same time by more than one thread?
3922    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
3923    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
3924    pTable->DestroyInstance(instance, pAllocator);
3925
3926    std::lock_guard<std::mutex> lock(global_lock);
3927    // Clean up logging callback, if any
3928    while (my_data->logging_callback.size() > 0) {
3929        VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
3930        layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
3931        my_data->logging_callback.pop_back();
3932    }
3933
3934    layer_debug_report_destroy_instance(my_data->report_data);
3935    delete my_data->instance_dispatch_table;
3936    layer_data_map.erase(key);
3937}
3938
3939static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
3940    uint32_t i;
3941    // TBD: Need any locking, in case this function is called at the same time
3942    // by more than one thread?
3943    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
3944    dev_data->device_extensions.wsi_enabled = false;
3945
3946    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
3947    PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
3948    pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
3949    pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
3950    pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
3951    pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
3952    pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
3953
3954    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
3955        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
3956            dev_data->device_extensions.wsi_enabled = true;
3957    }
3958}
3959
3960VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
3961                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
3962    layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
3963    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
3964
3965    assert(chain_info->u.pLayerInfo);
3966    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
3967    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
3968    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice");
3969    if (fpCreateDevice == NULL) {
3970        return VK_ERROR_INITIALIZATION_FAILED;
3971    }
3972
3973    // Advance the link info for the next element on the chain
3974    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
3975
3976    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
3977    if (result != VK_SUCCESS) {
3978        return result;
3979    }
3980
3981    std::unique_lock<std::mutex> lock(global_lock);
3982    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
3983
3984    // Setup device dispatch table
3985    my_device_data->device_dispatch_table = new VkLayerDispatchTable;
3986    layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
3987    my_device_data->device = *pDevice;
3988
3989    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
3990    createDeviceRegisterExtensions(pCreateInfo, *pDevice);
3991    // Get physical device limits for this device
3992    my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties));
3993    uint32_t count;
3994    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
3995    my_device_data->phys_dev_properties.queue_family_properties.resize(count);
3996    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
3997        gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]);
3998    // TODO: device limits should make sure these are compatible
3999    if (pCreateInfo->pEnabledFeatures) {
4000        my_device_data->phys_dev_properties.features = *pCreateInfo->pEnabledFeatures;
4001    } else {
4002        memset(&my_device_data->phys_dev_properties.features, 0, sizeof(VkPhysicalDeviceFeatures));
4003    }
4004    // Store physical device mem limits into device layer_data struct
4005    my_instance_data->instance_dispatch_table->GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props);
4006    lock.unlock();
4007
4008    ValidateLayerOrdering(*pCreateInfo);
4009
4010    return result;
4011}
4012
4013// prototype
4014static void deleteRenderPasses(layer_data *);
4015VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4016    // TODOSC : Shouldn't need any customization here
4017    dispatch_key key = get_dispatch_key(device);
4018    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4019    // Free all the memory
4020    std::unique_lock<std::mutex> lock(global_lock);
4021    deletePipelines(dev_data);
4022    deleteRenderPasses(dev_data);
4023    deleteCommandBuffers(dev_data);
4024    // This will also delete all sets in the pool & remove them from setMap
4025    deletePools(dev_data);
4026    // All sets should be removed
4027    assert(dev_data->setMap.empty());
4028    for (auto del_layout : dev_data->descriptorSetLayoutMap) {
4029        delete del_layout.second;
4030    }
4031    dev_data->descriptorSetLayoutMap.clear();
4032    dev_data->imageViewMap.clear();
4033    dev_data->imageMap.clear();
4034    dev_data->imageSubresourceMap.clear();
4035    dev_data->imageLayoutMap.clear();
4036    dev_data->bufferViewMap.clear();
4037    dev_data->bufferMap.clear();
4038    // Queues persist until device is destroyed
4039    dev_data->queueMap.clear();
4040    lock.unlock();
4041#if MTMERGESOURCE
4042    bool skipCall = false;
4043    lock.lock();
4044    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4045            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4046    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4047            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4048    print_mem_list(dev_data);
4049    printCBList(dev_data);
4050    // Report any memory leaks
4051    DEVICE_MEM_INFO *pInfo = NULL;
4052    if (!dev_data->memObjMap.empty()) {
4053        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4054            pInfo = &(*ii).second;
4055            if (pInfo->allocInfo.allocationSize != 0) {
4056                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4057                skipCall |=
4058                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4059                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK,
4060                            "MEM", "Mem Object 0x%" PRIx64 " has not been freed. You should clean up this memory by calling "
4061                                   "vkFreeMemory(0x%" PRIx64 ") prior to vkDestroyDevice().",
4062                            (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4063            }
4064        }
4065    }
4066    layer_debug_report_destroy_device(device);
4067    lock.unlock();
4068
4069#if DISPATCH_MAP_DEBUG
4070    fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
4071#endif
4072    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4073    if (!skipCall) {
4074        pDisp->DestroyDevice(device, pAllocator);
4075    }
4076#else
4077    dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
4078#endif
4079    delete dev_data->device_dispatch_table;
4080    layer_data_map.erase(key);
4081}
4082
4083static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4084
4085// This validates that the initial layout specified in the command buffer for
4086// the IMAGE is the same
4087// as the global IMAGE layout
4088static bool ValidateCmdBufImageLayouts(VkCommandBuffer cmdBuffer) {
4089    bool skip_call = false;
4090    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
4091    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
4092    for (auto cb_image_data : pCB->imageLayoutMap) {
4093        VkImageLayout imageLayout;
4094        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4095            skip_call |=
4096                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4097                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image 0x%" PRIx64 ".",
4098                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4099        } else {
4100            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4101                // TODO: Set memory invalid which is in mem_tracker currently
4102            } else if (imageLayout != cb_image_data.second.initialLayout) {
4103                if (cb_image_data.first.hasSubresource) {
4104                    skip_call |= log_msg(
4105                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4106                        reinterpret_cast<uint64_t &>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4107                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") [sub-resource: aspectMask 0x%X array layer %u, mip level %u], "
4108                        "with layout %s when first use is %s.",
4109                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.aspectMask,
4110                                cb_image_data.first.subresource.arrayLayer,
4111                                cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout),
4112                        string_VkImageLayout(cb_image_data.second.initialLayout));
4113                } else {
4114                    skip_call |= log_msg(
4115                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4116                        reinterpret_cast<uint64_t &>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4117                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") with layout %s when "
4118                        "first use is %s.",
4119                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4120                        string_VkImageLayout(cb_image_data.second.initialLayout));
4121                }
4122            }
4123            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4124        }
4125    }
4126    return skip_call;
4127}
4128
4129// Track which resources are in-flight by atomically incrementing their "in_use" count
4130static bool validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB) {
4131    bool skip_call = false;
4132    for (auto drawDataElement : pCB->drawData) {
4133        for (auto buffer : drawDataElement.buffers) {
4134            auto buffer_data = my_data->bufferMap.find(buffer);
4135            if (buffer_data == my_data->bufferMap.end()) {
4136                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4137                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4138                                     "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer));
4139            } else {
4140                buffer_data->second.in_use.fetch_add(1);
4141            }
4142        }
4143    }
4144    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4145        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4146            if (!my_data->setMap.count(set->GetSet())) {
4147                skip_call |=
4148                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4149                            (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS",
4150                            "Cannot submit cmd buffer using deleted descriptor set 0x%" PRIx64 ".", (uint64_t)(set));
4151            } else {
4152                set->in_use.fetch_add(1);
4153            }
4154        }
4155    }
4156    for (auto semaphore : pCB->semaphores) {
4157        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4158        if (semaphoreNode == my_data->semaphoreMap.end()) {
4159            skip_call |=
4160                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4161                        reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4162                        "Cannot submit cmd buffer using deleted semaphore 0x%" PRIx64 ".", reinterpret_cast<uint64_t &>(semaphore));
4163        } else {
4164            semaphoreNode->second.in_use.fetch_add(1);
4165        }
4166    }
4167    for (auto event : pCB->events) {
4168        auto eventNode = my_data->eventMap.find(event);
4169        if (eventNode == my_data->eventMap.end()) {
4170            skip_call |=
4171                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4172                        reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4173                        "Cannot submit cmd buffer using deleted event 0x%" PRIx64 ".", reinterpret_cast<uint64_t &>(event));
4174        } else {
4175            eventNode->second.in_use.fetch_add(1);
4176        }
4177    }
4178    return skip_call;
4179}
4180
4181// Note: This function assumes that the global lock is held by the calling
4182// thread.
4183static bool cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4184    bool skip_call = false;
4185    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4186    if (pCB) {
4187        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
4188            for (auto event : queryEventsPair.second) {
4189                if (my_data->eventMap[event].needsSignaled) {
4190                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4191                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
4192                                         "Cannot get query results on queryPool 0x%" PRIx64
4193                                         " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
4194                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
4195                }
4196            }
4197        }
4198    }
4199    return skip_call;
4200}
4201// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
4202static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
4203    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
4204    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer);
4205    pCB->in_use.fetch_sub(1);
4206    if (!pCB->in_use.load()) {
4207        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
4208    }
4209}
4210
4211static void decrementResources(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4212    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4213    for (auto drawDataElement : pCB->drawData) {
4214        for (auto buffer : drawDataElement.buffers) {
4215            auto buffer_data = my_data->bufferMap.find(buffer);
4216            if (buffer_data != my_data->bufferMap.end()) {
4217                buffer_data->second.in_use.fetch_sub(1);
4218            }
4219        }
4220    }
4221    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4222        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4223            set->in_use.fetch_sub(1);
4224        }
4225    }
4226    for (auto semaphore : pCB->semaphores) {
4227        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4228        if (semaphoreNode != my_data->semaphoreMap.end()) {
4229            semaphoreNode->second.in_use.fetch_sub(1);
4230        }
4231    }
4232    for (auto event : pCB->events) {
4233        auto eventNode = my_data->eventMap.find(event);
4234        if (eventNode != my_data->eventMap.end()) {
4235            eventNode->second.in_use.fetch_sub(1);
4236        }
4237    }
4238    for (auto queryStatePair : pCB->queryToStateMap) {
4239        my_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4240    }
4241    for (auto eventStagePair : pCB->eventToStageMap) {
4242        my_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4243    }
4244}
4245// For fenceCount fences in pFences, mark fence signaled, decrement in_use, and call
4246//  decrementResources for all priorFences and cmdBuffers associated with fence.
4247static bool decrementResources(layer_data *my_data, uint32_t fenceCount, const VkFence *pFences) {
4248    bool skip_call = false;
4249    std::vector<std::pair<VkFence, FENCE_NODE *>> fence_pairs;
4250    for (uint32_t i = 0; i < fenceCount; ++i) {
4251        auto fence_data = my_data->fenceMap.find(pFences[i]);
4252        if (fence_data == my_data->fenceMap.end() || !fence_data->second.needsSignaled)
4253            return skip_call;
4254        fence_data->second.needsSignaled = false;
4255        if (fence_data->second.in_use.load()) {
4256            fence_pairs.push_back(std::make_pair(fence_data->first, &fence_data->second));
4257            fence_data->second.in_use.fetch_sub(1);
4258        }
4259        decrementResources(my_data, static_cast<uint32_t>(fence_data->second.priorFences.size()),
4260                           fence_data->second.priorFences.data());
4261        for (auto cmdBuffer : fence_data->second.cmdBuffers) {
4262            decrementResources(my_data, cmdBuffer);
4263            skip_call |= cleanInFlightCmdBuffer(my_data, cmdBuffer);
4264            removeInFlightCmdBuffer(my_data, cmdBuffer);
4265        }
4266        fence_data->second.cmdBuffers.clear();
4267        fence_data->second.priorFences.clear();
4268    }
4269    for (auto fence_pair : fence_pairs) {
4270        for (auto queue : fence_pair.second->queues) {
4271            auto queue_pair = my_data->queueMap.find(queue);
4272            if (queue_pair != my_data->queueMap.end()) {
4273                auto last_fence_data =
4274                    std::find(queue_pair->second.lastFences.begin(), queue_pair->second.lastFences.end(), fence_pair.first);
4275                if (last_fence_data != queue_pair->second.lastFences.end())
4276                    queue_pair->second.lastFences.erase(last_fence_data);
4277            }
4278        }
4279    }
4280    return skip_call;
4281}
4282// Decrement in_use for all outstanding cmd buffers that were submitted on this queue
4283static bool decrementResources(layer_data *my_data, VkQueue queue) {
4284    bool skip_call = false;
4285    auto queue_data = my_data->queueMap.find(queue);
4286    if (queue_data != my_data->queueMap.end()) {
4287        for (auto cmdBuffer : queue_data->second.untrackedCmdBuffers) {
4288            decrementResources(my_data, cmdBuffer);
4289            skip_call |= cleanInFlightCmdBuffer(my_data, cmdBuffer);
4290            removeInFlightCmdBuffer(my_data, cmdBuffer);
4291        }
4292        queue_data->second.untrackedCmdBuffers.clear();
4293        skip_call |= decrementResources(my_data, static_cast<uint32_t>(queue_data->second.lastFences.size()),
4294                                        queue_data->second.lastFences.data());
4295    }
4296    return skip_call;
4297}
4298
4299// This function merges command buffer tracking between queues when there is a semaphore dependency
4300// between them (see below for details as to how tracking works). When this happens, the prior
4301// fences from the signaling queue are merged into the wait queue as well as any untracked command
4302// buffers.
4303static void updateTrackedCommandBuffers(layer_data *dev_data, VkQueue queue, VkQueue other_queue, VkFence fence) {
4304    if (queue == other_queue) {
4305        return;
4306    }
4307    auto queue_data = dev_data->queueMap.find(queue);
4308    auto other_queue_data = dev_data->queueMap.find(other_queue);
4309    if (queue_data == dev_data->queueMap.end() || other_queue_data == dev_data->queueMap.end()) {
4310        return;
4311    }
4312    for (auto fenceInner : other_queue_data->second.lastFences) {
4313        queue_data->second.lastFences.push_back(fenceInner);
4314        auto fence_node = dev_data->fenceMap.find(fenceInner);
4315        if (fence_node != dev_data->fenceMap.end()) {
4316            fence_node->second.queues.insert(other_queue_data->first);
4317        }
4318    }
4319    if (fence != VK_NULL_HANDLE) {
4320        auto fence_data = dev_data->fenceMap.find(fence);
4321        if (fence_data == dev_data->fenceMap.end()) {
4322            return;
4323        }
4324        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
4325            fence_data->second.cmdBuffers.push_back(cmdbuffer);
4326        }
4327        other_queue_data->second.untrackedCmdBuffers.clear();
4328    } else {
4329        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
4330            queue_data->second.untrackedCmdBuffers.push_back(cmdbuffer);
4331        }
4332        other_queue_data->second.untrackedCmdBuffers.clear();
4333    }
4334    for (auto eventStagePair : other_queue_data->second.eventToStageMap) {
4335        queue_data->second.eventToStageMap[eventStagePair.first] = eventStagePair.second;
4336    }
4337}
4338
4339// This is the core function for tracking command buffers. There are two primary ways command
4340// buffers are tracked. When submitted they are stored in the command buffer list associated
4341// with a fence or the untracked command buffer list associated with a queue if no fence is used.
4342// Each queue also stores the last fence that was submitted onto the queue. This allows us to
4343// create a linked list of fences and their associated command buffers so if one fence is
4344// waited on, prior fences on that queue are also considered to have been waited on. When a fence is
4345// waited on (either via a queue, device or fence), we free the cmd buffers for that fence and
4346// recursively call with the prior fences.
4347static void trackCommandBuffers(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
4348                                VkFence fence) {
4349    auto queue_data = my_data->queueMap.find(queue);
4350    if (fence != VK_NULL_HANDLE) {
4351        vector<VkFence> prior_fences;
4352        auto fence_data = my_data->fenceMap.find(fence);
4353        if (fence_data == my_data->fenceMap.end()) {
4354            return;
4355        }
4356        fence_data->second.cmdBuffers.clear();
4357        if (queue_data != my_data->queueMap.end()) {
4358            prior_fences = queue_data->second.lastFences;
4359            queue_data->second.lastFences.clear();
4360            queue_data->second.lastFences.push_back(fence);
4361            for (auto cmdbuffer : queue_data->second.untrackedCmdBuffers) {
4362                fence_data->second.cmdBuffers.push_back(cmdbuffer);
4363            }
4364            queue_data->second.untrackedCmdBuffers.clear();
4365        }
4366        fence_data->second.priorFences = prior_fences;
4367        fence_data->second.needsSignaled = true;
4368        fence_data->second.queues.insert(queue);
4369        fence_data->second.in_use.fetch_add(1);
4370        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4371            const VkSubmitInfo *submit = &pSubmits[submit_idx];
4372            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
4373                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
4374                    fence_data->second.cmdBuffers.push_back(secondaryCmdBuffer);
4375                }
4376                fence_data->second.cmdBuffers.push_back(submit->pCommandBuffers[i]);
4377            }
4378        }
4379    } else {
4380        if (queue_data != my_data->queueMap.end()) {
4381            for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4382                const VkSubmitInfo *submit = &pSubmits[submit_idx];
4383                for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
4384                    for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
4385                        queue_data->second.untrackedCmdBuffers.push_back(secondaryCmdBuffer);
4386                    }
4387                    queue_data->second.untrackedCmdBuffers.push_back(submit->pCommandBuffers[i]);
4388                }
4389            }
4390        }
4391    }
4392}
4393
4394static void markCommandBuffersInFlight(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
4395                                       VkFence fence) {
4396    auto queue_data = my_data->queueMap.find(queue);
4397    if (queue_data != my_data->queueMap.end()) {
4398        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4399            const VkSubmitInfo *submit = &pSubmits[submit_idx];
4400            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
4401                // Add cmdBuffers to the global set and increment count
4402                GLOBAL_CB_NODE *pCB = getCBNode(my_data, submit->pCommandBuffers[i]);
4403                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
4404                    my_data->globalInFlightCmdBuffers.insert(secondaryCmdBuffer);
4405                    GLOBAL_CB_NODE *pSubCB = getCBNode(my_data, secondaryCmdBuffer);
4406                    pSubCB->in_use.fetch_add(1);
4407                }
4408                my_data->globalInFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
4409                pCB->in_use.fetch_add(1);
4410            }
4411        }
4412    }
4413}
4414
4415static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4416    bool skip_call = false;
4417    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
4418        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4419        skip_call |=
4420            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4421                    __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
4422                    "Command Buffer 0x%" PRIx64 " is already in use and is not marked for simultaneous use.",
4423                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
4424    }
4425    return skip_call;
4426}
4427
4428static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4429    bool skipCall = false;
4430    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
4431    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
4432        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4433                            __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4434                            "CB 0x%" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
4435                            "set, but has been submitted 0x%" PRIxLEAST64 " times.",
4436                            (uint64_t)(pCB->commandBuffer), pCB->submitCount);
4437    }
4438    // Validate that cmd buffers have been updated
4439    if (CB_RECORDED != pCB->state) {
4440        if (CB_INVALID == pCB->state) {
4441            // Inform app of reason CB invalid
4442            bool causeReported = false;
4443            if (!pCB->destroyedSets.empty()) {
4444                std::stringstream set_string;
4445                for (auto set : pCB->destroyedSets)
4446                    set_string << " " << set;
4447
4448                skipCall |=
4449                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4450                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4451                            "You are submitting command buffer 0x%" PRIxLEAST64
4452                            " that is invalid because it had the following bound descriptor set(s) destroyed: %s",
4453                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
4454                causeReported = true;
4455            }
4456            if (!pCB->updatedSets.empty()) {
4457                std::stringstream set_string;
4458                for (auto set : pCB->updatedSets)
4459                    set_string << " " << set;
4460
4461                skipCall |=
4462                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4463                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4464                            "You are submitting command buffer 0x%" PRIxLEAST64
4465                            " that is invalid because it had the following bound descriptor set(s) updated: %s",
4466                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
4467                causeReported = true;
4468            }
4469            if (!pCB->destroyedFramebuffers.empty()) {
4470                std::stringstream fb_string;
4471                for (auto fb : pCB->destroyedFramebuffers)
4472                    fb_string << " " << fb;
4473
4474                skipCall |=
4475                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4476                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4477                            "You are submitting command buffer 0x%" PRIxLEAST64 " that is invalid because it had the following "
4478                            "referenced framebuffers destroyed: %s",
4479                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), fb_string.str().c_str());
4480                causeReported = true;
4481            }
4482            // TODO : This is defensive programming to make sure an error is
4483            //  flagged if we hit this INVALID cmd buffer case and none of the
4484            //  above cases are hit. As the number of INVALID cases grows, this
4485            //  code should be updated to seemlessly handle all the cases.
4486            if (!causeReported) {
4487                skipCall |= log_msg(
4488                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4489                    reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4490                    "You are submitting command buffer 0x%" PRIxLEAST64 " that is invalid due to an unknown cause. Validation "
4491                    "should "
4492                    "be improved to report the exact cause.",
4493                    reinterpret_cast<uint64_t &>(pCB->commandBuffer));
4494            }
4495        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
4496            skipCall |=
4497                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4498                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
4499                        "You must call vkEndCommandBuffer() on CB 0x%" PRIxLEAST64 " before this call to vkQueueSubmit()!",
4500                        (uint64_t)(pCB->commandBuffer));
4501        }
4502    }
4503    return skipCall;
4504}
4505
4506static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4507    // Track in-use for resources off of primary and any secondary CBs
4508    bool skipCall = validateAndIncrementResources(dev_data, pCB);
4509    if (!pCB->secondaryCommandBuffers.empty()) {
4510        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
4511            skipCall |= validateAndIncrementResources(dev_data, dev_data->commandBufferMap[secondaryCmdBuffer]);
4512            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
4513            if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
4514                !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4515                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4516                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4517                        "CB 0x%" PRIxLEAST64 " was submitted with secondary buffer 0x%" PRIxLEAST64
4518                        " but that buffer has subsequently been bound to "
4519                        "primary cmd buffer 0x%" PRIxLEAST64
4520                        " and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
4521                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
4522                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
4523            }
4524        }
4525    }
4526    skipCall |= validateCommandBufferState(dev_data, pCB);
4527    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
4528    // on device
4529    skipCall |= validateCommandBufferSimultaneousUse(dev_data, pCB);
4530    return skipCall;
4531}
4532
4533VKAPI_ATTR VkResult VKAPI_CALL
4534QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
4535    bool skipCall = false;
4536    GLOBAL_CB_NODE *pCBNode = NULL;
4537    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4538    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4539    std::unique_lock<std::mutex> lock(global_lock);
4540    // First verify that fence is not in use
4541    if (fence != VK_NULL_HANDLE) {
4542        if ((submitCount != 0) && dev_data->fenceMap[fence].in_use.load()) {
4543            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4544                                (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4545                                "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(fence));
4546        }
4547        if (!dev_data->fenceMap[fence].needsSignaled) {
4548            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4549                                reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4550                                "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
4551                                reinterpret_cast<uint64_t &>(fence));
4552        }
4553    }
4554    // TODO : Review these old print functions and clean up as appropriate
4555    print_mem_list(dev_data);
4556    printCBList(dev_data);
4557    // Update cmdBuffer-related data structs and mark fence in-use
4558    trackCommandBuffers(dev_data, queue, submitCount, pSubmits, fence);
4559    // Now verify each individual submit
4560    std::unordered_set<VkQueue> processed_other_queues;
4561    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4562        const VkSubmitInfo *submit = &pSubmits[submit_idx];
4563        vector<VkSemaphore> semaphoreList;
4564        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
4565            const VkSemaphore &semaphore = submit->pWaitSemaphores[i];
4566            semaphoreList.push_back(semaphore);
4567            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
4568                if (dev_data->semaphoreMap[semaphore].signaled) {
4569                    dev_data->semaphoreMap[semaphore].signaled = false;
4570                } else {
4571                    skipCall |=
4572                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4573                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4574                                "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
4575                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
4576                }
4577                const VkQueue &other_queue = dev_data->semaphoreMap[semaphore].queue;
4578                if (other_queue != VK_NULL_HANDLE && !processed_other_queues.count(other_queue)) {
4579                    updateTrackedCommandBuffers(dev_data, queue, other_queue, fence);
4580                    processed_other_queues.insert(other_queue);
4581                }
4582            }
4583        }
4584        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
4585            const VkSemaphore &semaphore = submit->pSignalSemaphores[i];
4586            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
4587                semaphoreList.push_back(semaphore);
4588                if (dev_data->semaphoreMap[semaphore].signaled) {
4589                    skipCall |=
4590                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4591                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4592                                "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
4593                                " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
4594                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
4595                                reinterpret_cast<uint64_t &>(dev_data->semaphoreMap[semaphore].queue));
4596                } else {
4597                    dev_data->semaphoreMap[semaphore].signaled = true;
4598                    dev_data->semaphoreMap[semaphore].queue = queue;
4599                }
4600            }
4601        }
4602        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
4603            skipCall |= ValidateCmdBufImageLayouts(submit->pCommandBuffers[i]);
4604            pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
4605            if (pCBNode) {
4606                pCBNode->semaphores = semaphoreList;
4607                pCBNode->submitCount++; // increment submit count
4608                pCBNode->lastSubmittedFence = fence;
4609                pCBNode->lastSubmittedQueue = queue;
4610                skipCall |= validatePrimaryCommandBufferState(dev_data, pCBNode);
4611                // Call submit-time functions to validate/update state
4612                for (auto &function : pCBNode->validate_functions) {
4613                    skipCall |= function();
4614                }
4615                for (auto &function : pCBNode->eventUpdates) {
4616                    skipCall |= function(queue);
4617                }
4618            }
4619        }
4620    }
4621    markCommandBuffersInFlight(dev_data, queue, submitCount, pSubmits, fence);
4622    lock.unlock();
4623    if (!skipCall)
4624        result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence);
4625
4626    return result;
4627}
4628
4629VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
4630                                              const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
4631    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4632    VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
4633    // TODO : Track allocations and overall size here
4634    std::lock_guard<std::mutex> lock(global_lock);
4635    add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
4636    print_mem_list(my_data);
4637    return result;
4638}
4639
4640VKAPI_ATTR void VKAPI_CALL
4641FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
4642    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4643
4644    // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
4645    // Before freeing a memory object, an application must ensure the memory object is no longer
4646    // in use by the device—for example by command buffers queued for execution. The memory need
4647    // not yet be unbound from all images and buffers, but any further use of those images or
4648    // buffers (on host or device) for anything other than destroying those objects will result in
4649    // undefined behavior.
4650
4651    std::unique_lock<std::mutex> lock(global_lock);
4652    freeMemObjInfo(my_data, device, mem, false);
4653    print_mem_list(my_data);
4654    printCBList(my_data);
4655    lock.unlock();
4656    my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator);
4657}
4658
4659static bool validateMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
4660    bool skipCall = false;
4661
4662    if (size == 0) {
4663        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4664                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4665                           "VkMapMemory: Attempting to map memory range of size zero");
4666    }
4667
4668    auto mem_element = my_data->memObjMap.find(mem);
4669    if (mem_element != my_data->memObjMap.end()) {
4670        // It is an application error to call VkMapMemory on an object that is already mapped
4671        if (mem_element->second.memRange.size != 0) {
4672            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4673                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4674                               "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem);
4675        }
4676
4677        // Validate that offset + size is within object's allocationSize
4678        if (size == VK_WHOLE_SIZE) {
4679            if (offset >= mem_element->second.allocInfo.allocationSize) {
4680                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4681                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
4682                                   "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64, offset,
4683                                   mem_element->second.allocInfo.allocationSize, mem_element->second.allocInfo.allocationSize);
4684            }
4685        } else {
4686            if ((offset + size) > mem_element->second.allocInfo.allocationSize) {
4687                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4688                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
4689                                   "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64, offset,
4690                                   size + offset, mem_element->second.allocInfo.allocationSize);
4691            }
4692        }
4693    }
4694    return skipCall;
4695}
4696
4697static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
4698    auto mem_element = my_data->memObjMap.find(mem);
4699    if (mem_element != my_data->memObjMap.end()) {
4700        MemRange new_range;
4701        new_range.offset = offset;
4702        new_range.size = size;
4703        mem_element->second.memRange = new_range;
4704    }
4705}
4706
4707static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
4708    bool skipCall = false;
4709    auto mem_element = my_data->memObjMap.find(mem);
4710    if (mem_element != my_data->memObjMap.end()) {
4711        if (!mem_element->second.memRange.size) {
4712            // Valid Usage: memory must currently be mapped
4713            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4714                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4715                               "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
4716        }
4717        mem_element->second.memRange.size = 0;
4718        if (mem_element->second.pData) {
4719            free(mem_element->second.pData);
4720            mem_element->second.pData = 0;
4721        }
4722    }
4723    return skipCall;
4724}
4725
4726static char NoncoherentMemoryFillValue = 0xb;
4727
4728static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize size, void **ppData) {
4729    auto mem_element = dev_data->memObjMap.find(mem);
4730    if (mem_element != dev_data->memObjMap.end()) {
4731        mem_element->second.pDriverData = *ppData;
4732        uint32_t index = mem_element->second.allocInfo.memoryTypeIndex;
4733        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
4734            mem_element->second.pData = 0;
4735        } else {
4736            if (size == VK_WHOLE_SIZE) {
4737                size = mem_element->second.allocInfo.allocationSize;
4738            }
4739            size_t convSize = (size_t)(size);
4740            mem_element->second.pData = malloc(2 * convSize);
4741            memset(mem_element->second.pData, NoncoherentMemoryFillValue, 2 * convSize);
4742            *ppData = static_cast<char *>(mem_element->second.pData) + (convSize / 2);
4743        }
4744    }
4745}
4746// Verify that state for fence being waited on is appropriate. That is,
4747//  a fence being waited on should not already be signalled and
4748//  it should have been submitted on a queue or during acquire next image
4749static inline bool verifyWaitFenceState(VkDevice device, VkFence fence, const char *apiCall) {
4750    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4751    bool skipCall = false;
4752    auto pFenceInfo = my_data->fenceMap.find(fence);
4753    if (pFenceInfo != my_data->fenceMap.end()) {
4754        if (!pFenceInfo->second.firstTimeFlag) {
4755            if (!pFenceInfo->second.needsSignaled) {
4756                skipCall |=
4757                    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4758                            (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4759                            "%s specified fence 0x%" PRIxLEAST64 " already in SIGNALED state.", apiCall, (uint64_t)fence);
4760            }
4761            if (pFenceInfo->second.queues.empty() && !pFenceInfo->second.swapchain) { // Checking status of unsubmitted fence
4762                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4763                                    reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4764                                    "%s called for fence 0x%" PRIxLEAST64 " which has not been submitted on a Queue or during "
4765                                    "acquire next image.",
4766                                    apiCall, reinterpret_cast<uint64_t &>(fence));
4767            }
4768        } else {
4769            pFenceInfo->second.firstTimeFlag = false;
4770        }
4771    }
4772    return skipCall;
4773}
4774
4775VKAPI_ATTR VkResult VKAPI_CALL
4776WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
4777    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4778    bool skip_call = false;
4779    // Verify fence status of submitted fences
4780    std::unique_lock<std::mutex> lock(global_lock);
4781    for (uint32_t i = 0; i < fenceCount; i++) {
4782        skip_call |= verifyWaitFenceState(device, pFences[i], "vkWaitForFences");
4783    }
4784    lock.unlock();
4785    if (skip_call)
4786        return VK_ERROR_VALIDATION_FAILED_EXT;
4787
4788    VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
4789
4790    if (result == VK_SUCCESS) {
4791        lock.lock();
4792        // When we know that all fences are complete we can clean/remove their CBs
4793        if (waitAll || fenceCount == 1) {
4794            skip_call |= decrementResources(dev_data, fenceCount, pFences);
4795        }
4796        // NOTE : Alternate case not handled here is when some fences have completed. In
4797        //  this case for app to guarantee which fences completed it will have to call
4798        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
4799        lock.unlock();
4800    }
4801    if (skip_call)
4802        return VK_ERROR_VALIDATION_FAILED_EXT;
4803    return result;
4804}
4805
4806VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
4807    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4808    bool skipCall = false;
4809    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4810    std::unique_lock<std::mutex> lock(global_lock);
4811    skipCall = verifyWaitFenceState(device, fence, "vkGetFenceStatus");
4812    lock.unlock();
4813
4814    if (skipCall)
4815        return result;
4816
4817    result = dev_data->device_dispatch_table->GetFenceStatus(device, fence);
4818    bool skip_call = false;
4819    lock.lock();
4820    if (result == VK_SUCCESS) {
4821        skipCall |= decrementResources(dev_data, 1, &fence);
4822    }
4823    lock.unlock();
4824    if (skip_call)
4825        return VK_ERROR_VALIDATION_FAILED_EXT;
4826    return result;
4827}
4828
4829VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
4830                                                            VkQueue *pQueue) {
4831    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4832    dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
4833    std::lock_guard<std::mutex> lock(global_lock);
4834
4835    // Add queue to tracking set only if it is new
4836    auto result = dev_data->queues.emplace(*pQueue);
4837    if (result.second == true) {
4838        QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
4839        pQNode->device = device;
4840    }
4841}
4842
4843VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
4844    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4845    bool skip_call = false;
4846    skip_call |= decrementResources(dev_data, queue);
4847    if (skip_call)
4848        return VK_ERROR_VALIDATION_FAILED_EXT;
4849    VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue);
4850    return result;
4851}
4852
4853VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
4854    bool skip_call = false;
4855    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4856    std::unique_lock<std::mutex> lock(global_lock);
4857    for (auto queue : dev_data->queues) {
4858        skip_call |= decrementResources(dev_data, queue);
4859    }
4860    dev_data->globalInFlightCmdBuffers.clear();
4861    lock.unlock();
4862    if (skip_call)
4863        return VK_ERROR_VALIDATION_FAILED_EXT;
4864    VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device);
4865    return result;
4866}
4867
4868VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
4869    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4870    bool skipCall = false;
4871    std::unique_lock<std::mutex> lock(global_lock);
4872    auto fence_pair = dev_data->fenceMap.find(fence);
4873    if (fence_pair != dev_data->fenceMap.end()) {
4874        if (fence_pair->second.in_use.load()) {
4875            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4876                                (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4877                                "Fence 0x%" PRIx64 " is in use by a command buffer.", (uint64_t)(fence));
4878        }
4879        dev_data->fenceMap.erase(fence_pair);
4880    }
4881    lock.unlock();
4882
4883    if (!skipCall)
4884        dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
4885}
4886
4887VKAPI_ATTR void VKAPI_CALL
4888DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
4889    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4890    dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
4891    std::lock_guard<std::mutex> lock(global_lock);
4892    auto item = dev_data->semaphoreMap.find(semaphore);
4893    if (item != dev_data->semaphoreMap.end()) {
4894        if (item->second.in_use.load()) {
4895            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4896                    reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4897                    "Cannot delete semaphore 0x%" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore));
4898        }
4899        dev_data->semaphoreMap.erase(semaphore);
4900    }
4901    // TODO : Clean up any internal data structures using this obj.
4902}
4903
4904VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
4905    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4906    bool skip_call = false;
4907    std::unique_lock<std::mutex> lock(global_lock);
4908    auto event_data = dev_data->eventMap.find(event);
4909    if (event_data != dev_data->eventMap.end()) {
4910        if (event_data->second.in_use.load()) {
4911            skip_call |= log_msg(
4912                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4913                reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4914                "Cannot delete event 0x%" PRIx64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event));
4915        }
4916        dev_data->eventMap.erase(event_data);
4917    }
4918    lock.unlock();
4919    if (!skip_call)
4920        dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator);
4921    // TODO : Clean up any internal data structures using this obj.
4922}
4923
4924VKAPI_ATTR void VKAPI_CALL
4925DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
4926    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
4927        ->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
4928    // TODO : Clean up any internal data structures using this obj.
4929}
4930
4931VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
4932                                                   uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
4933                                                   VkQueryResultFlags flags) {
4934    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4935    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
4936    GLOBAL_CB_NODE *pCB = nullptr;
4937    std::unique_lock<std::mutex> lock(global_lock);
4938    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
4939        pCB = getCBNode(dev_data, cmdBuffer);
4940        for (auto queryStatePair : pCB->queryToStateMap) {
4941            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
4942        }
4943    }
4944    bool skip_call = false;
4945    for (uint32_t i = 0; i < queryCount; ++i) {
4946        QueryObject query = {queryPool, firstQuery + i};
4947        auto queryElement = queriesInFlight.find(query);
4948        auto queryToStateElement = dev_data->queryToStateMap.find(query);
4949        if (queryToStateElement != dev_data->queryToStateMap.end()) {
4950            // Available and in flight
4951            if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
4952                queryToStateElement->second) {
4953                for (auto cmdBuffer : queryElement->second) {
4954                    pCB = getCBNode(dev_data, cmdBuffer);
4955                    auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
4956                    if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
4957                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4958                                             VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
4959                                             "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
4960                                             (uint64_t)(queryPool), firstQuery + i);
4961                    } else {
4962                        for (auto event : queryEventElement->second) {
4963                            dev_data->eventMap[event].needsSignaled = true;
4964                        }
4965                    }
4966                }
4967                // Unavailable and in flight
4968            } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
4969                       !queryToStateElement->second) {
4970                // TODO : Can there be the same query in use by multiple command buffers in flight?
4971                bool make_available = false;
4972                for (auto cmdBuffer : queryElement->second) {
4973                    pCB = getCBNode(dev_data, cmdBuffer);
4974                    make_available |= pCB->queryToStateMap[query];
4975                }
4976                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
4977                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4978                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
4979                                         "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
4980                                         (uint64_t)(queryPool), firstQuery + i);
4981                }
4982                // Unavailable
4983            } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
4984                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4985                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
4986                                     "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
4987                                     (uint64_t)(queryPool), firstQuery + i);
4988                // Unitialized
4989            } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
4990                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4991                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
4992                                     "Cannot get query results on queryPool 0x%" PRIx64
4993                                     " with index %d as data has not been collected for this index.",
4994                                     (uint64_t)(queryPool), firstQuery + i);
4995            }
4996        }
4997    }
4998    lock.unlock();
4999    if (skip_call)
5000        return VK_ERROR_VALIDATION_FAILED_EXT;
5001    return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride,
5002                                                                flags);
5003}
5004
5005static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5006    bool skip_call = false;
5007    auto buffer_data = my_data->bufferMap.find(buffer);
5008    if (buffer_data == my_data->bufferMap.end()) {
5009        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5010                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5011                             "Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5012    } else {
5013        if (buffer_data->second.in_use.load()) {
5014            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5015                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5016                                 "Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5017        }
5018    }
5019    return skip_call;
5020}
5021
5022static bool print_memory_range_error(layer_data *dev_data, const uint64_t object_handle, const uint64_t other_handle,
5023                                     VkDebugReportObjectTypeEXT object_type) {
5024    if (object_type == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) {
5025        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5026                       MEMTRACK_INVALID_ALIASING, "MEM", "Buffer 0x%" PRIx64 " is aliased with image 0x%" PRIx64, object_handle,
5027                       other_handle);
5028    } else {
5029        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5030                       MEMTRACK_INVALID_ALIASING, "MEM", "Image 0x%" PRIx64 " is aliased with buffer 0x%" PRIx64, object_handle,
5031                       other_handle);
5032    }
5033}
5034
5035static bool validate_memory_range(layer_data *dev_data, const vector<MEMORY_RANGE> &ranges, const MEMORY_RANGE &new_range,
5036                                  VkDebugReportObjectTypeEXT object_type) {
5037    bool skip_call = false;
5038
5039    for (auto range : ranges) {
5040        if ((range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) <
5041            (new_range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)))
5042            continue;
5043        if ((range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) >
5044            (new_range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)))
5045            continue;
5046        skip_call |= print_memory_range_error(dev_data, new_range.handle, range.handle, object_type);
5047    }
5048    return skip_call;
5049}
5050
5051static MEMORY_RANGE insert_memory_ranges(uint64_t handle, VkDeviceMemory mem, VkDeviceSize memoryOffset,
5052                                         VkMemoryRequirements memRequirements, vector<MEMORY_RANGE> &ranges) {
5053    MEMORY_RANGE range;
5054    range.handle = handle;
5055    range.memory = mem;
5056    range.start = memoryOffset;
5057    range.end = memoryOffset + memRequirements.size - 1;
5058    ranges.push_back(range);
5059    return range;
5060}
5061
5062static void remove_memory_ranges(uint64_t handle, VkDeviceMemory mem, vector<MEMORY_RANGE> &ranges) {
5063    for (uint32_t item = 0; item < ranges.size(); item++) {
5064        if ((ranges[item].handle == handle) && (ranges[item].memory == mem)) {
5065            ranges.erase(ranges.begin() + item);
5066            break;
5067        }
5068    }
5069}
5070
5071VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer,
5072                                         const VkAllocationCallbacks *pAllocator) {
5073    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5074    bool skipCall = false;
5075    std::unique_lock<std::mutex> lock(global_lock);
5076    if (!validateIdleBuffer(dev_data, buffer) && !skipCall) {
5077        lock.unlock();
5078        dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator);
5079        lock.lock();
5080    }
5081    // Clean up memory binding and range information for buffer
5082    const auto &bufferEntry = dev_data->bufferMap.find(buffer);
5083    if (bufferEntry != dev_data->bufferMap.end()) {
5084        const auto &memEntry = dev_data->memObjMap.find(bufferEntry->second.mem);
5085        if (memEntry != dev_data->memObjMap.end()) {
5086            remove_memory_ranges(reinterpret_cast<uint64_t &>(buffer), bufferEntry->second.mem, memEntry->second.bufferRanges);
5087        }
5088        clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5089        dev_data->bufferMap.erase(bufferEntry);
5090    }
5091}
5092
5093VKAPI_ATTR void VKAPI_CALL
5094DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5095    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5096    dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
5097    std::lock_guard<std::mutex> lock(global_lock);
5098    auto item = dev_data->bufferViewMap.find(bufferView);
5099    if (item != dev_data->bufferViewMap.end()) {
5100        dev_data->bufferViewMap.erase(item);
5101    }
5102}
5103
5104VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5105    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5106    bool skipCall = false;
5107    if (!skipCall) {
5108        dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
5109    }
5110
5111    std::lock_guard<std::mutex> lock(global_lock);
5112    const auto &imageEntry = dev_data->imageMap.find(image);
5113    if (imageEntry != dev_data->imageMap.end()) {
5114        // Clean up memory mapping, bindings and range references for image
5115        auto memEntry = dev_data->memObjMap.find(imageEntry->second.mem);
5116        if (memEntry != dev_data->memObjMap.end()) {
5117            remove_memory_ranges(reinterpret_cast<uint64_t &>(image), imageEntry->second.mem, memEntry->second.imageRanges);
5118            clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5119            memEntry->second.image = VK_NULL_HANDLE;
5120        }
5121        // Remove image from imageMap
5122        dev_data->imageMap.erase(imageEntry);
5123    }
5124    const auto& subEntry = dev_data->imageSubresourceMap.find(image);
5125    if (subEntry != dev_data->imageSubresourceMap.end()) {
5126        for (const auto& pair : subEntry->second) {
5127            dev_data->imageLayoutMap.erase(pair);
5128        }
5129        dev_data->imageSubresourceMap.erase(subEntry);
5130    }
5131}
5132
5133VKAPI_ATTR VkResult VKAPI_CALL
5134BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5135    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5136    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5137    std::unique_lock<std::mutex> lock(global_lock);
5138    // Track objects tied to memory
5139    uint64_t buffer_handle = (uint64_t)(buffer);
5140    bool skipCall =
5141        set_mem_binding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5142    auto buffer_node = dev_data->bufferMap.find(buffer);
5143    if (buffer_node != dev_data->bufferMap.end()) {
5144        buffer_node->second.mem = mem;
5145        VkMemoryRequirements memRequirements;
5146        dev_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, &memRequirements);
5147
5148        // Track and validate bound memory range information
5149        const auto &memEntry = dev_data->memObjMap.find(mem);
5150        if (memEntry != dev_data->memObjMap.end()) {
5151            const MEMORY_RANGE range =
5152                insert_memory_ranges(buffer_handle, mem, memoryOffset, memRequirements, memEntry->second.bufferRanges);
5153            skipCall |=
5154                validate_memory_range(dev_data, memEntry->second.imageRanges, range, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5155        }
5156
5157        // Validate memory requirements alignment
5158        if (vk_safe_modulo(memoryOffset, memRequirements.alignment) != 0) {
5159            skipCall |=
5160                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
5161                        __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS",
5162                        "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the "
5163                        "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
5164                        ", returned from a call to vkGetBufferMemoryRequirements with buffer",
5165                        memoryOffset, memRequirements.alignment);
5166        }
5167        // Validate device limits alignments
5168        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].createInfo.usage;
5169        if (usage & (VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)) {
5170            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment) != 0) {
5171                skipCall |=
5172                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5173                            0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS",
5174                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5175                            "device limit minTexelBufferOffsetAlignment 0x%" PRIxLEAST64,
5176                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment);
5177            }
5178        }
5179        if (usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) {
5180            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) !=
5181                0) {
5182                skipCall |=
5183                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5184                            0, __LINE__, DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
5185                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5186                            "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64,
5187                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
5188            }
5189        }
5190        if (usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) {
5191            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) !=
5192                0) {
5193                skipCall |=
5194                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5195                            0, __LINE__, DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
5196                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5197                            "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64,
5198                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
5199            }
5200        }
5201    }
5202    print_mem_list(dev_data);
5203    lock.unlock();
5204    if (!skipCall) {
5205        result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset);
5206    }
5207    return result;
5208}
5209
5210VKAPI_ATTR void VKAPI_CALL
5211GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5212    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5213    // TODO : What to track here?
5214    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
5215    my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5216}
5217
5218VKAPI_ATTR void VKAPI_CALL
5219GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5220    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5221    // TODO : What to track here?
5222    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
5223    my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements);
5224}
5225
5226VKAPI_ATTR void VKAPI_CALL
5227DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5228    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5229        ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
5230    // TODO : Clean up any internal data structures using this obj.
5231}
5232
5233VKAPI_ATTR void VKAPI_CALL
5234DestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
5235    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5236
5237    std::unique_lock<std::mutex> lock(global_lock);
5238    my_data->shaderModuleMap.erase(shaderModule);
5239    lock.unlock();
5240
5241    my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
5242}
5243
5244VKAPI_ATTR void VKAPI_CALL
5245DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
5246    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator);
5247    // TODO : Clean up any internal data structures using this obj.
5248}
5249
5250VKAPI_ATTR void VKAPI_CALL
5251DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
5252    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5253        ->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
5254    // TODO : Clean up any internal data structures using this obj.
5255}
5256
5257VKAPI_ATTR void VKAPI_CALL
5258DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
5259    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator);
5260    // TODO : Clean up any internal data structures using this obj.
5261}
5262
5263VKAPI_ATTR void VKAPI_CALL
5264DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
5265    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5266        ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
5267    // TODO : Clean up any internal data structures using this obj.
5268}
5269
5270VKAPI_ATTR void VKAPI_CALL
5271DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
5272    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5273        ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
5274    // TODO : Clean up any internal data structures using this obj.
5275}
5276// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result
5277//  If this is a secondary command buffer, then make sure its primary is also in-flight
5278//  If primary is not in-flight, then remove secondary from global in-flight set
5279// This function is only valid at a point when cmdBuffer is being reset or freed
5280static bool checkAndClearCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action) {
5281    bool skip_call = false;
5282    if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
5283        // Primary CB or secondary where primary is also in-flight is an error
5284        if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
5285            (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
5286            skip_call |= log_msg(
5287                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5288                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
5289                "Attempt to %s command buffer (0x%" PRIxLEAST64 ") which is in use.", action,
5290                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer));
5291        } else { // Secondary CB w/o primary in-flight, remove from in-flight
5292            dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer);
5293        }
5294    }
5295    return skip_call;
5296}
5297// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
5298static bool checkAndClearCommandBuffersInFlight(layer_data *dev_data, const VkCommandPool commandPool, const char *action) {
5299    bool skip_call = false;
5300    auto pool_data = dev_data->commandPoolMap.find(commandPool);
5301    if (pool_data != dev_data->commandPoolMap.end()) {
5302        for (auto cmd_buffer : pool_data->second.commandBuffers) {
5303            if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
5304                skip_call |= checkAndClearCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action);
5305            }
5306        }
5307    }
5308    return skip_call;
5309}
5310
5311VKAPI_ATTR void VKAPI_CALL
5312FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
5313    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5314
5315    bool skip_call = false;
5316    std::unique_lock<std::mutex> lock(global_lock);
5317    for (uint32_t i = 0; i < commandBufferCount; i++) {
5318        auto cb_pair = dev_data->commandBufferMap.find(pCommandBuffers[i]);
5319        skip_call |= checkAndClearCommandBufferInFlight(dev_data, cb_pair->second, "free");
5320        // Delete CB information structure, and remove from commandBufferMap
5321        if (cb_pair != dev_data->commandBufferMap.end()) {
5322            // reset prior to delete for data clean-up
5323            resetCB(dev_data, (*cb_pair).second->commandBuffer);
5324            delete (*cb_pair).second;
5325            dev_data->commandBufferMap.erase(cb_pair);
5326        }
5327
5328        // Remove commandBuffer reference from commandPoolMap
5329        dev_data->commandPoolMap[commandPool].commandBuffers.remove(pCommandBuffers[i]);
5330    }
5331    printCBList(dev_data);
5332    lock.unlock();
5333
5334    if (!skip_call)
5335        dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
5336}
5337
5338VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
5339                                                 const VkAllocationCallbacks *pAllocator,
5340                                                 VkCommandPool *pCommandPool) {
5341    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5342
5343    VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
5344
5345    if (VK_SUCCESS == result) {
5346        std::lock_guard<std::mutex> lock(global_lock);
5347        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
5348        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
5349    }
5350    return result;
5351}
5352
5353VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
5354                                               const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
5355
5356    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5357    VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
5358    if (result == VK_SUCCESS) {
5359        std::lock_guard<std::mutex> lock(global_lock);
5360        dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo;
5361    }
5362    return result;
5363}
5364
5365// Destroy commandPool along with all of the commandBuffers allocated from that pool
5366VKAPI_ATTR void VKAPI_CALL
5367DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
5368    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5369    bool skipCall = false;
5370    std::unique_lock<std::mutex> lock(global_lock);
5371    // Verify that command buffers in pool are complete (not in-flight)
5372    VkBool32 result = checkAndClearCommandBuffersInFlight(dev_data, commandPool, "destroy command pool with");
5373    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandPoolMap
5374    if (dev_data->commandPoolMap.find(commandPool) != dev_data->commandPoolMap.end()) {
5375        for (auto poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
5376             poolCb != dev_data->commandPoolMap[commandPool].commandBuffers.end();) {
5377            clear_cmd_buf_and_mem_references(dev_data, *poolCb);
5378            auto del_cb = dev_data->commandBufferMap.find(*poolCb);
5379            delete (*del_cb).second;                  // delete CB info structure
5380            dev_data->commandBufferMap.erase(del_cb); // Remove this command buffer
5381            poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.erase(
5382                poolCb); // Remove CB reference from commandPoolMap's list
5383        }
5384    }
5385    dev_data->commandPoolMap.erase(commandPool);
5386
5387    lock.unlock();
5388
5389    if (result)
5390        return;
5391
5392    if (!skipCall)
5393        dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
5394}
5395
5396VKAPI_ATTR VkResult VKAPI_CALL
5397ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
5398    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5399    bool skipCall = false;
5400    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5401
5402    if (checkAndClearCommandBuffersInFlight(dev_data, commandPool, "reset command pool with"))
5403        return VK_ERROR_VALIDATION_FAILED_EXT;
5404
5405    if (!skipCall)
5406        result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
5407
5408    // Reset all of the CBs allocated from this pool
5409    if (VK_SUCCESS == result) {
5410        std::lock_guard<std::mutex> lock(global_lock);
5411        auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
5412        while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
5413            resetCB(dev_data, (*it));
5414            ++it;
5415        }
5416    }
5417    return result;
5418}
5419
5420VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
5421    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5422    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5423    bool skipCall = false;
5424    std::unique_lock<std::mutex> lock(global_lock);
5425    for (uint32_t i = 0; i < fenceCount; ++i) {
5426        auto fence_item = dev_data->fenceMap.find(pFences[i]);
5427        if (fence_item != dev_data->fenceMap.end()) {
5428            fence_item->second.needsSignaled = true;
5429            fence_item->second.queues.clear();
5430            if (fence_item->second.in_use.load()) {
5431                skipCall |=
5432                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5433                            reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5434                            "Fence 0x%" PRIx64 " is in use by a command buffer.", reinterpret_cast<const uint64_t &>(pFences[i]));
5435            }
5436        }
5437    }
5438    lock.unlock();
5439    if (!skipCall)
5440        result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences);
5441    return result;
5442}
5443
5444VKAPI_ATTR void VKAPI_CALL
5445DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
5446    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5447    std::unique_lock<std::mutex> lock(global_lock);
5448    auto fbNode = dev_data->frameBufferMap.find(framebuffer);
5449    if (fbNode != dev_data->frameBufferMap.end()) {
5450        for (auto cb : fbNode->second.referencingCmdBuffers) {
5451            auto cbNode = dev_data->commandBufferMap.find(cb);
5452            if (cbNode != dev_data->commandBufferMap.end()) {
5453                // Set CB as invalid and record destroyed framebuffer
5454                cbNode->second->state = CB_INVALID;
5455                cbNode->second->destroyedFramebuffers.insert(framebuffer);
5456            }
5457        }
5458        delete [] fbNode->second.createInfo.pAttachments;
5459        dev_data->frameBufferMap.erase(fbNode);
5460    }
5461    lock.unlock();
5462    dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
5463}
5464
5465VKAPI_ATTR void VKAPI_CALL
5466DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
5467    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5468    dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
5469    std::lock_guard<std::mutex> lock(global_lock);
5470    dev_data->renderPassMap.erase(renderPass);
5471}
5472
5473VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
5474                                            const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
5475    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5476
5477    VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
5478
5479    if (VK_SUCCESS == result) {
5480        std::lock_guard<std::mutex> lock(global_lock);
5481        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
5482        dev_data->bufferMap.insert(std::make_pair(*pBuffer, BUFFER_NODE(pCreateInfo)));
5483    }
5484    return result;
5485}
5486
5487VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
5488                                                const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
5489    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5490    VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
5491    if (VK_SUCCESS == result) {
5492        std::lock_guard<std::mutex> lock(global_lock);
5493        dev_data->bufferViewMap[*pView] = VkBufferViewCreateInfo(*pCreateInfo);
5494        // In order to create a valid buffer view, the buffer must have been created with at least one of the
5495        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
5496        validate_buffer_usage_flags(dev_data, pCreateInfo->buffer,
5497                                    VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false,
5498                                    "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
5499    }
5500    return result;
5501}
5502
5503VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
5504                                           const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
5505    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5506
5507    VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
5508
5509    if (VK_SUCCESS == result) {
5510        std::lock_guard<std::mutex> lock(global_lock);
5511        IMAGE_LAYOUT_NODE image_node;
5512        image_node.layout = pCreateInfo->initialLayout;
5513        image_node.format = pCreateInfo->format;
5514        dev_data->imageMap.insert(std::make_pair(*pImage, IMAGE_NODE(pCreateInfo)));
5515        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
5516        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
5517        dev_data->imageLayoutMap[subpair] = image_node;
5518    }
5519    return result;
5520}
5521
5522static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
5523    /* expects global_lock to be held by caller */
5524
5525    auto image_node_it = dev_data->imageMap.find(image);
5526    if (image_node_it != dev_data->imageMap.end()) {
5527        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
5528         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
5529         * the actual values.
5530         */
5531        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
5532            range->levelCount = image_node_it->second.createInfo.mipLevels - range->baseMipLevel;
5533        }
5534
5535        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
5536            range->layerCount = image_node_it->second.createInfo.arrayLayers - range->baseArrayLayer;
5537        }
5538    }
5539}
5540
5541// Return the correct layer/level counts if the caller used the special
5542// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
5543static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
5544                                         VkImage image) {
5545    /* expects global_lock to be held by caller */
5546
5547    *levels = range.levelCount;
5548    *layers = range.layerCount;
5549    auto image_node_it = dev_data->imageMap.find(image);
5550    if (image_node_it != dev_data->imageMap.end()) {
5551        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
5552            *levels = image_node_it->second.createInfo.mipLevels - range.baseMipLevel;
5553        }
5554        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
5555            *layers = image_node_it->second.createInfo.arrayLayers - range.baseArrayLayer;
5556        }
5557    }
5558}
5559
5560VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
5561                                               const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
5562    bool skipCall = false;
5563    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5564    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5565    {
5566        // Validate that img has correct usage flags set
5567        std::lock_guard<std::mutex> lock(global_lock);
5568        skipCall |= validate_image_usage_flags(dev_data, pCreateInfo->image,
5569                VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT |
5570                VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
5571                false, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT");
5572    }
5573
5574    if (!skipCall) {
5575        result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
5576    }
5577
5578    if (VK_SUCCESS == result) {
5579        std::lock_guard<std::mutex> lock(global_lock);
5580        VkImageViewCreateInfo localCI = VkImageViewCreateInfo(*pCreateInfo);
5581        ResolveRemainingLevelsLayers(dev_data, &localCI.subresourceRange, pCreateInfo->image);
5582        dev_data->imageViewMap[*pView] = localCI;
5583    }
5584
5585    return result;
5586}
5587
5588VKAPI_ATTR VkResult VKAPI_CALL
5589CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
5590    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5591    VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
5592    if (VK_SUCCESS == result) {
5593        std::lock_guard<std::mutex> lock(global_lock);
5594        auto &fence_node = dev_data->fenceMap[*pFence];
5595        fence_node.createInfo = *pCreateInfo;
5596        fence_node.needsSignaled = true;
5597        if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
5598            fence_node.firstTimeFlag = true;
5599            fence_node.needsSignaled = false;
5600        }
5601        fence_node.in_use.store(0);
5602    }
5603    return result;
5604}
5605
5606// TODO handle pipeline caches
5607VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
5608                                                   const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
5609    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5610    VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
5611    return result;
5612}
5613
5614VKAPI_ATTR void VKAPI_CALL
5615DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
5616    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5617    dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator);
5618}
5619
5620VKAPI_ATTR VkResult VKAPI_CALL
5621GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
5622    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5623    VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
5624    return result;
5625}
5626
5627VKAPI_ATTR VkResult VKAPI_CALL
5628MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
5629    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5630    VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
5631    return result;
5632}
5633
5634// utility function to set collective state for pipeline
5635void set_pipeline_state(PIPELINE_NODE *pPipe) {
5636    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
5637    if (pPipe->graphicsPipelineCI.pColorBlendState) {
5638        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
5639            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
5640                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5641                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5642                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5643                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5644                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5645                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5646                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5647                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
5648                    pPipe->blendConstantsEnabled = true;
5649                }
5650            }
5651        }
5652    }
5653}
5654
5655VKAPI_ATTR VkResult VKAPI_CALL
5656CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
5657                        const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
5658                        VkPipeline *pPipelines) {
5659    VkResult result = VK_SUCCESS;
5660    // TODO What to do with pipelineCache?
5661    // The order of operations here is a little convoluted but gets the job done
5662    //  1. Pipeline create state is first shadowed into PIPELINE_NODE struct
5663    //  2. Create state is then validated (which uses flags setup during shadowing)
5664    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
5665    bool skipCall = false;
5666    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
5667    vector<PIPELINE_NODE *> pPipeNode(count);
5668    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5669
5670    uint32_t i = 0;
5671    std::unique_lock<std::mutex> lock(global_lock);
5672
5673    for (i = 0; i < count; i++) {
5674        pPipeNode[i] = new PIPELINE_NODE;
5675        pPipeNode[i]->initGraphicsPipeline(&pCreateInfos[i]);
5676
5677        auto renderpass_it = dev_data->renderPassMap.find(pCreateInfos[i].renderPass);
5678        if (renderpass_it != dev_data->renderPassMap.end()) {
5679            pPipeNode[i]->renderPass = renderpass_it->second;
5680        }
5681
5682        auto pipeline_layout_it = dev_data->pipelineLayoutMap.find(pCreateInfos[i].layout);
5683        if (pipeline_layout_it != dev_data->pipelineLayoutMap.end()) {
5684            pPipeNode[i]->pipelineLayout = &pipeline_layout_it->second;
5685        }
5686
5687        skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
5688    }
5689
5690    if (!skipCall) {
5691        lock.unlock();
5692        result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
5693                                                                          pPipelines);
5694        lock.lock();
5695        for (i = 0; i < count; i++) {
5696            pPipeNode[i]->pipeline = pPipelines[i];
5697            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
5698        }
5699        lock.unlock();
5700    } else {
5701        for (i = 0; i < count; i++) {
5702            delete pPipeNode[i];
5703        }
5704        lock.unlock();
5705        return VK_ERROR_VALIDATION_FAILED_EXT;
5706    }
5707    return result;
5708}
5709
5710VKAPI_ATTR VkResult VKAPI_CALL
5711CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
5712                       const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
5713                       VkPipeline *pPipelines) {
5714    VkResult result = VK_SUCCESS;
5715    bool skipCall = false;
5716
5717    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
5718    vector<PIPELINE_NODE *> pPipeNode(count);
5719    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5720
5721    uint32_t i = 0;
5722    std::unique_lock<std::mutex> lock(global_lock);
5723    for (i = 0; i < count; i++) {
5724        // TODO: Verify compute stage bits
5725
5726        // Create and initialize internal tracking data structure
5727        pPipeNode[i] = new PIPELINE_NODE;
5728        pPipeNode[i]->initComputePipeline(&pCreateInfos[i]);
5729
5730        auto pipeline_layout_it = dev_data->pipelineLayoutMap.find(pCreateInfos[i].layout);
5731        if (pipeline_layout_it != dev_data->pipelineLayoutMap.end()) {
5732            pPipeNode[i]->pipelineLayout = &pipeline_layout_it->second;
5733        }
5734        // memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
5735
5736        // TODO: Add Compute Pipeline Verification
5737        // skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
5738    }
5739
5740    if (!skipCall) {
5741        lock.unlock();
5742        result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
5743                                                                         pPipelines);
5744        lock.lock();
5745        for (i = 0; i < count; i++) {
5746            pPipeNode[i]->pipeline = pPipelines[i];
5747            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
5748        }
5749        lock.unlock();
5750    } else {
5751        for (i = 0; i < count; i++) {
5752            // Clean up any locally allocated data structures
5753            delete pPipeNode[i];
5754        }
5755        lock.unlock();
5756        return VK_ERROR_VALIDATION_FAILED_EXT;
5757    }
5758    return result;
5759}
5760
5761VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
5762                                             const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
5763    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5764    VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler);
5765    if (VK_SUCCESS == result) {
5766        std::lock_guard<std::mutex> lock(global_lock);
5767        dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
5768    }
5769    return result;
5770}
5771
5772VKAPI_ATTR VkResult VKAPI_CALL
5773CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
5774                          const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
5775    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5776    VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
5777    if (VK_SUCCESS == result) {
5778        // TODOSC : Capture layout bindings set
5779        std::lock_guard<std::mutex> lock(global_lock);
5780        dev_data->descriptorSetLayoutMap[*pSetLayout] =
5781            new cvdescriptorset::DescriptorSetLayout(dev_data->report_data, pCreateInfo, *pSetLayout);
5782    }
5783    return result;
5784}
5785
5786// Used by CreatePipelineLayout and CmdPushConstants.
5787// Note that the index argument is optional and only used by CreatePipelineLayout.
5788static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
5789                                      const char *caller_name, uint32_t index = 0) {
5790    uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
5791    bool skipCall = false;
5792    // Check that offset + size don't exceed the max.
5793    // Prevent arithetic overflow here by avoiding addition and testing in this order.
5794    if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
5795        // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
5796        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5797            skipCall |=
5798                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5799                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with offset %u and size %u that "
5800                                                              "exceeds this device's maxPushConstantSize of %u.",
5801                        caller_name, index, offset, size, maxPushConstantsSize);
5802        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5803            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5804                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
5805                                                                      "exceeds this device's maxPushConstantSize of %u.",
5806                                caller_name, offset, size, maxPushConstantsSize);
5807        } else {
5808            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5809                                DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
5810        }
5811    }
5812    // size needs to be non-zero and a multiple of 4.
5813    if ((size == 0) || ((size & 0x3) != 0)) {
5814        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5815            skipCall |=
5816                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5817                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
5818                                                              "size %u. Size must be greater than zero and a multiple of 4.",
5819                        caller_name, index, size);
5820        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5821            skipCall |=
5822                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5823                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
5824                                                              "size %u. Size must be greater than zero and a multiple of 4.",
5825                        caller_name, size);
5826        } else {
5827            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5828                                DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
5829        }
5830    }
5831    // offset needs to be a multiple of 4.
5832    if ((offset & 0x3) != 0) {
5833        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5834            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5835                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
5836                                                                      "offset %u. Offset must be a multiple of 4.",
5837                                caller_name, index, offset);
5838        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5839            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5840                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
5841                                                                      "offset %u. Offset must be a multiple of 4.",
5842                                caller_name, offset);
5843        } else {
5844            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5845                                DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
5846        }
5847    }
5848    return skipCall;
5849}
5850
5851VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
5852                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
5853    bool skipCall = false;
5854    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5855    // Push Constant Range checks
5856    uint32_t i = 0;
5857    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5858        skipCall |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
5859                                              pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
5860        if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
5861            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5862                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has no stageFlags set.");
5863        }
5864    }
5865    // Each range has been validated.  Now check for overlap between ranges (if they are good).
5866    if (!skipCall) {
5867        uint32_t i, j;
5868        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5869            for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
5870                const uint32_t minA = pCreateInfo->pPushConstantRanges[i].offset;
5871                const uint32_t maxA = minA + pCreateInfo->pPushConstantRanges[i].size;
5872                const uint32_t minB = pCreateInfo->pPushConstantRanges[j].offset;
5873                const uint32_t maxB = minB + pCreateInfo->pPushConstantRanges[j].size;
5874                if ((minA <= minB && maxA > minB) || (minB <= minA && maxB > minA)) {
5875                    skipCall |=
5876                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5877                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constants with "
5878                                                                      "overlapping ranges: %u:[%u, %u), %u:[%u, %u)",
5879                                i, minA, maxA, j, minB, maxB);
5880                }
5881            }
5882        }
5883    }
5884
5885    if (skipCall)
5886        return VK_ERROR_VALIDATION_FAILED_EXT;
5887
5888    VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
5889    if (VK_SUCCESS == result) {
5890        std::lock_guard<std::mutex> lock(global_lock);
5891        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
5892        plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount);
5893        plNode.setLayouts.resize(pCreateInfo->setLayoutCount);
5894        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
5895            plNode.descriptorSetLayouts[i] = pCreateInfo->pSetLayouts[i];
5896            auto set_layout_it = dev_data->descriptorSetLayoutMap.find(pCreateInfo->pSetLayouts[i]);
5897            if (set_layout_it != dev_data->descriptorSetLayoutMap.end()) {
5898                plNode.setLayouts[i] = set_layout_it->second;
5899            }
5900            else {
5901                plNode.setLayouts[i] = nullptr;
5902            }
5903        }
5904        plNode.pushConstantRanges.resize(pCreateInfo->pushConstantRangeCount);
5905        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5906            plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i];
5907        }
5908    }
5909    return result;
5910}
5911
5912VKAPI_ATTR VkResult VKAPI_CALL
5913CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
5914                     VkDescriptorPool *pDescriptorPool) {
5915    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5916    VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
5917    if (VK_SUCCESS == result) {
5918        // Insert this pool into Global Pool LL at head
5919        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5920                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool 0x%" PRIxLEAST64,
5921                    (uint64_t)*pDescriptorPool))
5922            return VK_ERROR_VALIDATION_FAILED_EXT;
5923        DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
5924        if (NULL == pNewNode) {
5925            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5926                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
5927                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
5928                return VK_ERROR_VALIDATION_FAILED_EXT;
5929        } else {
5930            std::lock_guard<std::mutex> lock(global_lock);
5931            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
5932        }
5933    } else {
5934        // Need to do anything if pool create fails?
5935    }
5936    return result;
5937}
5938
5939VKAPI_ATTR VkResult VKAPI_CALL
5940ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
5941    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5942    VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags);
5943    if (VK_SUCCESS == result) {
5944        std::lock_guard<std::mutex> lock(global_lock);
5945        clearDescriptorPool(dev_data, device, descriptorPool, flags);
5946    }
5947    return result;
5948}
5949
5950VKAPI_ATTR VkResult VKAPI_CALL
5951AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
5952    bool skipCall = false;
5953    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5954
5955    std::unique_lock<std::mutex> lock(global_lock);
5956    // Verify that requested descriptorSets are available in pool
5957    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
5958    if (!pPoolNode) {
5959        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
5960                            (uint64_t)pAllocateInfo->descriptorPool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
5961                            "Unable to find pool node for pool 0x%" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
5962                            (uint64_t)pAllocateInfo->descriptorPool);
5963    } else { // Make sure pool has all the available descriptors before calling down chain
5964        skipCall |= validate_descriptor_availability_in_pool(dev_data, pPoolNode, pAllocateInfo->descriptorSetCount,
5965                                                             pAllocateInfo->pSetLayouts);
5966    }
5967    lock.unlock();
5968    if (skipCall)
5969        return VK_ERROR_VALIDATION_FAILED_EXT;
5970    VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
5971    if (VK_SUCCESS == result) {
5972        lock.lock();
5973        DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
5974        if (pPoolNode) {
5975            if (pAllocateInfo->descriptorSetCount == 0) {
5976                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5977                        pAllocateInfo->descriptorSetCount, __LINE__, DRAWSTATE_NONE, "DS",
5978                        "AllocateDescriptorSets called with 0 count");
5979            }
5980            for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
5981                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5982                        (uint64_t)pDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS", "Created Descriptor Set 0x%" PRIxLEAST64,
5983                        (uint64_t)pDescriptorSets[i]);
5984                auto layout_pair = dev_data->descriptorSetLayoutMap.find(pAllocateInfo->pSetLayouts[i]);
5985                if (layout_pair == dev_data->descriptorSetLayoutMap.end()) {
5986                    if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5987                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pAllocateInfo->pSetLayouts[i],
5988                                __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS", "Unable to find set layout node for layout 0x%" PRIxLEAST64
5989                                                                          " specified in vkAllocateDescriptorSets() call",
5990                                (uint64_t)pAllocateInfo->pSetLayouts[i])) {
5991                        lock.unlock();
5992                        return VK_ERROR_VALIDATION_FAILED_EXT;
5993                    }
5994                }
5995                // Create new DescriptorSet instance and add to the pool's unordered_set of DescriptorSets
5996                cvdescriptorset::DescriptorSet *pNewNode = new cvdescriptorset::DescriptorSet(
5997                    pDescriptorSets[i], layout_pair->second, dev_data->report_data, &dev_data->bufferMap, &dev_data->memObjMap,
5998                    &dev_data->bufferViewMap, &dev_data->samplerMap, &dev_data->imageViewMap, &dev_data->imageMap,
5999                    &dev_data->device_extensions.imageToSwapchainMap, &dev_data->device_extensions.swapchainMap);
6000                if (NULL == pNewNode) {
6001                    if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6002                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6003                                DRAWSTATE_OUT_OF_MEMORY, "DS", "Out of memory while attempting to allocate "
6004                                                               "cvdescriptorset::DescriptorSet in vkAllocateDescriptorSets()")) {
6005                        lock.unlock();
6006                        return VK_ERROR_VALIDATION_FAILED_EXT;
6007                    }
6008                } else {
6009                    // Insert set into this pool
6010                    pPoolNode->sets.insert(pNewNode);
6011                    pNewNode->in_use.store(0);
6012                    dev_data->setMap[pDescriptorSets[i]] = pNewNode;
6013                }
6014            }
6015        }
6016        lock.unlock();
6017    }
6018    return result;
6019}
6020
6021VKAPI_ATTR VkResult VKAPI_CALL
6022FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6023    bool skipCall = false;
6024    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6025    // Make sure that no sets being destroyed are in-flight
6026    std::unique_lock<std::mutex> lock(global_lock);
6027    for (uint32_t i = 0; i < count; ++i)
6028        skipCall |= validateIdleDescriptorSet(dev_data, pDescriptorSets[i], "vkFreeDescriptorSets");
6029    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, descriptorPool);
6030    if (pPoolNode && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pPoolNode->createInfo.flags)) {
6031        // Can't Free from a NON_FREE pool
6032        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
6033                            (uint64_t)device, __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
6034                            "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6035                            "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
6036    }
6037    lock.unlock();
6038    if (skipCall)
6039        return VK_ERROR_VALIDATION_FAILED_EXT;
6040    VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6041    if (VK_SUCCESS == result) {
6042        lock.lock();
6043
6044        // Update available descriptor sets in pool
6045        pPoolNode->availableSets += count;
6046
6047        // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
6048        for (uint32_t i = 0; i < count; ++i) {
6049            cvdescriptorset::DescriptorSet *pSet = dev_data->setMap[pDescriptorSets[i]]; // getSetNode() without locking
6050            uint32_t typeIndex = 0, poolSizeCount = 0;
6051            for (uint32_t j = 0; j < pSet->GetBindingCount(); ++j) {
6052                typeIndex = static_cast<uint32_t>(pSet->GetTypeFromIndex(j));
6053                poolSizeCount = pSet->GetDescriptorCountFromIndex(j);
6054                pPoolNode->availableDescriptorTypeCount[typeIndex] += poolSizeCount;
6055            }
6056            freeDescriptorSet(dev_data, pSet);
6057            pPoolNode->sets.erase(pSet);
6058        }
6059        lock.unlock();
6060    }
6061    // TODO : Any other clean-up or book-keeping to do here?
6062    return result;
6063}
6064
6065VKAPI_ATTR void VKAPI_CALL
6066UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6067                     uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6068    // dsUpdate will return true only if a bailout error occurs, so we want to call down tree when update returns false
6069    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6070    std::unique_lock<std::mutex> lock(global_lock);
6071    bool rtn = dsUpdate(dev_data, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
6072    lock.unlock();
6073    if (!rtn) {
6074        dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6075                                                              pDescriptorCopies);
6076    }
6077}
6078
6079VKAPI_ATTR VkResult VKAPI_CALL
6080AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6081    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6082    VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6083    if (VK_SUCCESS == result) {
6084        std::unique_lock<std::mutex> lock(global_lock);
6085        auto const &cp_it = dev_data->commandPoolMap.find(pCreateInfo->commandPool);
6086        if (cp_it != dev_data->commandPoolMap.end()) {
6087            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6088                // Add command buffer to its commandPool map
6089                cp_it->second.commandBuffers.push_back(pCommandBuffer[i]);
6090                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6091                // Add command buffer to map
6092                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6093                resetCB(dev_data, pCommandBuffer[i]);
6094                pCB->createInfo = *pCreateInfo;
6095                pCB->device = device;
6096            }
6097        }
6098        printCBList(dev_data);
6099        lock.unlock();
6100    }
6101    return result;
6102}
6103
6104VKAPI_ATTR VkResult VKAPI_CALL
6105BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6106    bool skipCall = false;
6107    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6108    std::unique_lock<std::mutex> lock(global_lock);
6109    // Validate command buffer level
6110    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6111    if (pCB) {
6112        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6113        if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
6114            skipCall |=
6115                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6116                        (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6117                        "Calling vkBeginCommandBuffer() on active CB 0x%p before it has completed. "
6118                        "You must check CB fence before this call.",
6119                        commandBuffer);
6120        }
6121        clear_cmd_buf_and_mem_references(dev_data, pCB);
6122        if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6123            // Secondary Command Buffer
6124            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6125            if (!pInfo) {
6126                skipCall |=
6127                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6128                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6129                            "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info.",
6130                            reinterpret_cast<void *>(commandBuffer));
6131            } else {
6132                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6133                    if (!pInfo->renderPass) { // renderpass should NOT be null for a Secondary CB
6134                        skipCall |= log_msg(
6135                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6136                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6137                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must specify a valid renderpass parameter.",
6138                            reinterpret_cast<void *>(commandBuffer));
6139                    }
6140                    if (!pInfo->framebuffer) { // framebuffer may be null for a Secondary CB, but this affects perf
6141                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6142                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6143                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE,
6144                                            "DS", "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) may perform better if a "
6145                                                  "valid framebuffer parameter is specified.",
6146                                            reinterpret_cast<void *>(commandBuffer));
6147                    } else {
6148                        string errorString = "";
6149                        auto fbNode = dev_data->frameBufferMap.find(pInfo->framebuffer);
6150                        if (fbNode != dev_data->frameBufferMap.end()) {
6151                            VkRenderPass fbRP = fbNode->second.createInfo.renderPass;
6152                            if (!verify_renderpass_compatibility(dev_data, fbRP, pInfo->renderPass, errorString)) {
6153                                // renderPass that framebuffer was created with must be compatible with local renderPass
6154                                skipCall |=
6155                                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6156                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6157                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
6158                                            "DS", "vkBeginCommandBuffer(): Secondary Command "
6159                                                  "Buffer (0x%p) renderPass (0x%" PRIxLEAST64 ") is incompatible w/ framebuffer "
6160                                                  "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
6161                                            reinterpret_cast<void *>(commandBuffer), (uint64_t)(pInfo->renderPass),
6162                                            (uint64_t)(pInfo->framebuffer), (uint64_t)(fbRP), errorString.c_str());
6163                            }
6164                            // Connect this framebuffer to this cmdBuffer
6165                            fbNode->second.referencingCmdBuffers.insert(pCB->commandBuffer);
6166                        }
6167                    }
6168                }
6169                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
6170                     dev_data->phys_dev_properties.features.occlusionQueryPrecise == VK_FALSE) &&
6171                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
6172                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6173                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6174                                        __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6175                                        "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
6176                                        "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
6177                                        "support precise occlusion queries.",
6178                                        reinterpret_cast<void *>(commandBuffer));
6179                }
6180            }
6181            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
6182                auto rp_data = dev_data->renderPassMap.find(pInfo->renderPass);
6183                if (rp_data != dev_data->renderPassMap.end() && rp_data->second && rp_data->second->pCreateInfo) {
6184                    if (pInfo->subpass >= rp_data->second->pCreateInfo->subpassCount) {
6185                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6186                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
6187                                            DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6188                                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must has a subpass index (%d) "
6189                                            "that is less than the number of subpasses (%d).",
6190                                            (void *)commandBuffer, pInfo->subpass, rp_data->second->pCreateInfo->subpassCount);
6191                    }
6192                }
6193            }
6194        }
6195        if (CB_RECORDING == pCB->state) {
6196            skipCall |=
6197                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6198                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6199                        "vkBeginCommandBuffer(): Cannot call Begin on CB (0x%" PRIxLEAST64
6200                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
6201                        (uint64_t)commandBuffer);
6202        } else if (CB_RECORDED == pCB->state || (CB_INVALID == pCB->state && CMD_END == pCB->cmds.back().type)) {
6203            VkCommandPool cmdPool = pCB->createInfo.commandPool;
6204            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
6205                skipCall |=
6206                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6207                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6208                            "Call to vkBeginCommandBuffer() on command buffer (0x%" PRIxLEAST64
6209                            ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
6210                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6211                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
6212            }
6213            resetCB(dev_data, commandBuffer);
6214        }
6215        // Set updated state here in case implicit reset occurs above
6216        pCB->state = CB_RECORDING;
6217        pCB->beginInfo = *pBeginInfo;
6218        if (pCB->beginInfo.pInheritanceInfo) {
6219            pCB->inheritanceInfo = *(pCB->beginInfo.pInheritanceInfo);
6220            pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo;
6221            // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
6222            if ((pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
6223                (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6224                auto inherited_render_pass_it = dev_data->renderPassMap.find(pCB->beginInfo.pInheritanceInfo->renderPass);
6225                pCB->activeRenderPass = inherited_render_pass_it != dev_data->renderPassMap.end() ? inherited_render_pass_it->second : nullptr;
6226                pCB->activeSubpass = pCB->beginInfo.pInheritanceInfo->subpass;
6227                pCB->framebuffers.insert(pCB->beginInfo.pInheritanceInfo->framebuffer);
6228            }
6229        }
6230    } else {
6231        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6232                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
6233                            "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB 0x%p!", (void *)commandBuffer);
6234    }
6235    lock.unlock();
6236    if (skipCall) {
6237        return VK_ERROR_VALIDATION_FAILED_EXT;
6238    }
6239    VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo);
6240
6241    return result;
6242}
6243
6244VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
6245    bool skipCall = false;
6246    VkResult result = VK_SUCCESS;
6247    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6248    std::unique_lock<std::mutex> lock(global_lock);
6249    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6250    if (pCB) {
6251        if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) || !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6252            // This needs spec clarification to update valid usage, see comments in PR:
6253            // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
6254            skipCall |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer");
6255        }
6256        skipCall |= addCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
6257        for (auto query : pCB->activeQueries) {
6258            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6259                                DRAWSTATE_INVALID_QUERY, "DS",
6260                                "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d",
6261                                (uint64_t)(query.pool), query.index);
6262        }
6263    }
6264    if (!skipCall) {
6265        lock.unlock();
6266        result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
6267        lock.lock();
6268        if (VK_SUCCESS == result) {
6269            pCB->state = CB_RECORDED;
6270            // Reset CB status flags
6271            pCB->status = 0;
6272            printCB(dev_data, commandBuffer);
6273        }
6274    } else {
6275        result = VK_ERROR_VALIDATION_FAILED_EXT;
6276    }
6277    lock.unlock();
6278    return result;
6279}
6280
6281VKAPI_ATTR VkResult VKAPI_CALL
6282ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
6283    bool skip_call = false;
6284    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6285    std::unique_lock<std::mutex> lock(global_lock);
6286    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6287    VkCommandPool cmdPool = pCB->createInfo.commandPool;
6288    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
6289        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6290                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6291                             "Attempt to reset command buffer (0x%" PRIxLEAST64 ") created from command pool (0x%" PRIxLEAST64
6292                             ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6293                             (uint64_t)commandBuffer, (uint64_t)cmdPool);
6294    }
6295    skip_call |= checkAndClearCommandBufferInFlight(dev_data, pCB, "reset");
6296    lock.unlock();
6297    if (skip_call)
6298        return VK_ERROR_VALIDATION_FAILED_EXT;
6299    VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags);
6300    if (VK_SUCCESS == result) {
6301        lock.lock();
6302        resetCB(dev_data, commandBuffer);
6303        lock.unlock();
6304    }
6305    return result;
6306}
6307
6308VKAPI_ATTR void VKAPI_CALL
6309CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
6310    bool skipCall = false;
6311    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6312    std::unique_lock<std::mutex> lock(global_lock);
6313    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6314    if (pCB) {
6315        skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
6316        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
6317            skipCall |=
6318                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6319                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
6320                        "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
6321                        (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass->renderPass);
6322        }
6323
6324        PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
6325        if (pPN) {
6326            pCB->lastBound[pipelineBindPoint].pipeline = pipeline;
6327            set_cb_pso_status(pCB, pPN);
6328            set_pipeline_state(pPN);
6329        } else {
6330            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6331                                (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
6332                                "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
6333        }
6334    }
6335    lock.unlock();
6336    if (!skipCall)
6337        dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
6338}
6339
6340VKAPI_ATTR void VKAPI_CALL
6341CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
6342    bool skipCall = false;
6343    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6344    std::unique_lock<std::mutex> lock(global_lock);
6345    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6346    if (pCB) {
6347        skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
6348        pCB->status |= CBSTATUS_VIEWPORT_SET;
6349        pCB->viewports.resize(viewportCount);
6350        memcpy(pCB->viewports.data(), pViewports, viewportCount * sizeof(VkViewport));
6351    }
6352    lock.unlock();
6353    if (!skipCall)
6354        dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
6355}
6356
6357VKAPI_ATTR void VKAPI_CALL
6358CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
6359    bool skipCall = false;
6360    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6361    std::unique_lock<std::mutex> lock(global_lock);
6362    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6363    if (pCB) {
6364        skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
6365        pCB->status |= CBSTATUS_SCISSOR_SET;
6366        pCB->scissors.resize(scissorCount);
6367        memcpy(pCB->scissors.data(), pScissors, scissorCount * sizeof(VkRect2D));
6368    }
6369    lock.unlock();
6370    if (!skipCall)
6371        dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
6372}
6373
6374VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
6375    bool skip_call = false;
6376    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6377    std::unique_lock<std::mutex> lock(global_lock);
6378    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6379    if (pCB) {
6380        skip_call |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
6381        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
6382
6383        PIPELINE_NODE *pPipeTrav = getPipeline(dev_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
6384        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
6385            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
6386                                 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SET, "DS",
6387                                 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
6388                                 "flag.  This is undefined behavior and could be ignored.");
6389        } else {
6390            skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
6391        }
6392    }
6393    lock.unlock();
6394    if (!skip_call)
6395        dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth);
6396}
6397
6398VKAPI_ATTR void VKAPI_CALL
6399CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
6400    bool skipCall = false;
6401    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6402    std::unique_lock<std::mutex> lock(global_lock);
6403    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6404    if (pCB) {
6405        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
6406        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
6407    }
6408    lock.unlock();
6409    if (!skipCall)
6410        dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp,
6411                                                         depthBiasSlopeFactor);
6412}
6413
6414VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
6415    bool skipCall = false;
6416    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6417    std::unique_lock<std::mutex> lock(global_lock);
6418    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6419    if (pCB) {
6420        skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
6421        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
6422    }
6423    lock.unlock();
6424    if (!skipCall)
6425        dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants);
6426}
6427
6428VKAPI_ATTR void VKAPI_CALL
6429CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
6430    bool skipCall = false;
6431    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6432    std::unique_lock<std::mutex> lock(global_lock);
6433    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6434    if (pCB) {
6435        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
6436        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
6437    }
6438    lock.unlock();
6439    if (!skipCall)
6440        dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
6441}
6442
6443VKAPI_ATTR void VKAPI_CALL
6444CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
6445    bool skipCall = false;
6446    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6447    std::unique_lock<std::mutex> lock(global_lock);
6448    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6449    if (pCB) {
6450        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
6451        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
6452    }
6453    lock.unlock();
6454    if (!skipCall)
6455        dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
6456}
6457
6458VKAPI_ATTR void VKAPI_CALL
6459CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
6460    bool skipCall = false;
6461    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6462    std::unique_lock<std::mutex> lock(global_lock);
6463    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6464    if (pCB) {
6465        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
6466        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
6467    }
6468    lock.unlock();
6469    if (!skipCall)
6470        dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
6471}
6472
6473VKAPI_ATTR void VKAPI_CALL
6474CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
6475    bool skipCall = false;
6476    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6477    std::unique_lock<std::mutex> lock(global_lock);
6478    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6479    if (pCB) {
6480        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
6481        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
6482    }
6483    lock.unlock();
6484    if (!skipCall)
6485        dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference);
6486}
6487
6488VKAPI_ATTR void VKAPI_CALL
6489CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
6490                      uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
6491                      const uint32_t *pDynamicOffsets) {
6492    bool skipCall = false;
6493    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6494    std::unique_lock<std::mutex> lock(global_lock);
6495    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6496    if (pCB) {
6497        if (pCB->state == CB_RECORDING) {
6498            // Track total count of dynamic descriptor types to make sure we have an offset for each one
6499            uint32_t totalDynamicDescriptors = 0;
6500            string errorString = "";
6501            uint32_t lastSetIndex = firstSet + setCount - 1;
6502            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
6503                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
6504                pCB->lastBound[pipelineBindPoint].dynamicOffsets.resize(lastSetIndex + 1);
6505            }
6506            auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
6507            for (uint32_t i = 0; i < setCount; i++) {
6508                cvdescriptorset::DescriptorSet *pSet = getSetNode(dev_data, pDescriptorSets[i]);
6509                if (pSet) {
6510                    pCB->lastBound[pipelineBindPoint].uniqueBoundSets.insert(pSet);
6511                    pSet->BindCommandBuffer(pCB);
6512                    pCB->lastBound[pipelineBindPoint].pipelineLayout = layout;
6513                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pSet;
6514                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
6515                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6516                                        DRAWSTATE_NONE, "DS", "DS 0x%" PRIxLEAST64 " bound on pipeline %s",
6517                                        (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
6518                    if (!pSet->IsUpdated() && (pSet->GetTotalDescriptorCount() != 0)) {
6519                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6520                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
6521                                            __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
6522                                            "DS 0x%" PRIxLEAST64
6523                                            " bound but it was never updated. You may want to either update it or not bind it.",
6524                                            (uint64_t)pDescriptorSets[i]);
6525                    }
6526                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
6527                    if (!verify_set_layout_compatibility(dev_data, pSet, layout, i + firstSet, errorString)) {
6528                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6529                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6530                                            DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
6531                                            "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
6532                                            "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s",
6533                                            i, i + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str());
6534                    }
6535
6536                    auto setDynamicDescriptorCount = pSet->GetDynamicDescriptorCount();
6537
6538                    pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i].clear();
6539
6540                    if (setDynamicDescriptorCount) {
6541                        // First make sure we won't overstep bounds of pDynamicOffsets array
6542                        if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) {
6543                            skipCall |=
6544                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6545                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6546                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
6547                                        "descriptorSet #%u (0x%" PRIxLEAST64
6548                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
6549                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
6550                                        i, (uint64_t)pDescriptorSets[i], pSet->GetDynamicDescriptorCount(),
6551                                        (dynamicOffsetCount - totalDynamicDescriptors));
6552                        } else { // Validate and store dynamic offsets with the set
6553                            // Validate Dynamic Offset Minimums
6554                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
6555                            for (uint32_t d = 0; d < pSet->GetTotalDescriptorCount(); d++) {
6556                                if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
6557                                    if (vk_safe_modulo(
6558                                            pDynamicOffsets[cur_dyn_offset],
6559                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
6560                                        skipCall |= log_msg(
6561                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6562                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
6563                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
6564                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6565                                            "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64,
6566                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6567                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
6568                                    }
6569                                    cur_dyn_offset++;
6570                                } else if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
6571                                    if (vk_safe_modulo(
6572                                            pDynamicOffsets[cur_dyn_offset],
6573                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
6574                                        skipCall |= log_msg(
6575                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6576                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
6577                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
6578                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6579                                            "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64,
6580                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6581                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
6582                                    }
6583                                    cur_dyn_offset++;
6584                                }
6585                            }
6586
6587                            pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i] =
6588                                std::vector<uint32_t>(pDynamicOffsets + totalDynamicDescriptors,
6589                                                      pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount);
6590                            // Keep running total of dynamic descriptor count to verify at the end
6591                            totalDynamicDescriptors += setDynamicDescriptorCount;
6592
6593                        }
6594                    }
6595                } else {
6596                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6597                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6598                                        DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS 0x%" PRIxLEAST64 " that doesn't exist!",
6599                                        (uint64_t)pDescriptorSets[i]);
6600                }
6601                skipCall |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
6602                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
6603                if (firstSet > 0) { // Check set #s below the first bound set
6604                    for (uint32_t i = 0; i < firstSet; ++i) {
6605                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
6606                            !verify_set_layout_compatibility(dev_data, pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i],
6607                                                             layout, i, errorString)) {
6608                            skipCall |= log_msg(
6609                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
6610                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6611                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
6612                                "DescriptorSetDS 0x%" PRIxLEAST64
6613                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
6614                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
6615                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
6616                        }
6617                    }
6618                }
6619                // Check if newly last bound set invalidates any remaining bound sets
6620                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
6621                    if (oldFinalBoundSet &&
6622                        !verify_set_layout_compatibility(dev_data, oldFinalBoundSet, layout, lastSetIndex, errorString)) {
6623                        auto old_set = oldFinalBoundSet->GetSet();
6624                        skipCall |=
6625                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
6626                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__,
6627                                    DRAWSTATE_NONE, "DS", "DescriptorSetDS 0x%" PRIxLEAST64
6628                                                          " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
6629                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
6630                                                          "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
6631                                    reinterpret_cast<uint64_t &>(old_set), lastSetIndex,
6632                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
6633                                    lastSetIndex + 1, (uint64_t)layout);
6634                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
6635                    }
6636                }
6637            }
6638            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
6639            if (totalDynamicDescriptors != dynamicOffsetCount) {
6640                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6641                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
6642                                    DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
6643                                    "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
6644                                    "is %u. It should exactly match the number of dynamic descriptors.",
6645                                    setCount, totalDynamicDescriptors, dynamicOffsetCount);
6646            }
6647        } else {
6648            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
6649        }
6650    }
6651    lock.unlock();
6652    if (!skipCall)
6653        dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
6654                                                               pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
6655}
6656
6657VKAPI_ATTR void VKAPI_CALL
6658CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
6659    bool skipCall = false;
6660    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6661    // TODO : Somewhere need to verify that IBs have correct usage state flagged
6662    std::unique_lock<std::mutex> lock(global_lock);
6663    VkDeviceMemory mem;
6664    skipCall =
6665        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6666    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
6667    if (cb_data != dev_data->commandBufferMap.end()) {
6668        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindIndexBuffer()"); };
6669        cb_data->second->validate_functions.push_back(function);
6670        skipCall |= addCmd(dev_data, cb_data->second, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
6671        VkDeviceSize offset_align = 0;
6672        switch (indexType) {
6673        case VK_INDEX_TYPE_UINT16:
6674            offset_align = 2;
6675            break;
6676        case VK_INDEX_TYPE_UINT32:
6677            offset_align = 4;
6678            break;
6679        default:
6680            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
6681            break;
6682        }
6683        if (!offset_align || (offset % offset_align)) {
6684            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6685                                DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
6686                                "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
6687                                offset, string_VkIndexType(indexType));
6688        }
6689        cb_data->second->status |= CBSTATUS_INDEX_BUFFER_BOUND;
6690    }
6691    lock.unlock();
6692    if (!skipCall)
6693        dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
6694}
6695
6696void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
6697    uint32_t end = firstBinding + bindingCount;
6698    if (pCB->currentDrawData.buffers.size() < end) {
6699        pCB->currentDrawData.buffers.resize(end);
6700    }
6701    for (uint32_t i = 0; i < bindingCount; ++i) {
6702        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
6703    }
6704}
6705
6706static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
6707
6708VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
6709                                                uint32_t bindingCount, const VkBuffer *pBuffers,
6710                                                const VkDeviceSize *pOffsets) {
6711    bool skipCall = false;
6712    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6713    // TODO : Somewhere need to verify that VBs have correct usage state flagged
6714    std::unique_lock<std::mutex> lock(global_lock);
6715    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
6716    if (cb_data != dev_data->commandBufferMap.end()) {
6717        for (uint32_t i = 0; i < bindingCount; ++i) {
6718            VkDeviceMemory mem;
6719            skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)pBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6720
6721            std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindVertexBuffers()"); };
6722            cb_data->second->validate_functions.push_back(function);
6723        }
6724        addCmd(dev_data, cb_data->second, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
6725        updateResourceTracking(cb_data->second, firstBinding, bindingCount, pBuffers);
6726    } else {
6727        skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
6728    }
6729    lock.unlock();
6730    if (!skipCall)
6731        dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
6732}
6733
6734/* expects global_lock to be held by caller */
6735static bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
6736    bool skip_call = false;
6737
6738    for (auto imageView : pCB->updateImages) {
6739        auto iv_data = dev_data->imageViewMap.find(imageView);
6740        if (iv_data == dev_data->imageViewMap.end())
6741            continue;
6742        VkImage image = iv_data->second.image;
6743        VkDeviceMemory mem;
6744        skip_call |=
6745            get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
6746        std::function<bool()> function = [=]() {
6747            set_memory_valid(dev_data, mem, true, image);
6748            return false;
6749        };
6750        pCB->validate_functions.push_back(function);
6751    }
6752    for (auto buffer : pCB->updateBuffers) {
6753        VkDeviceMemory mem;
6754        skip_call |= get_mem_binding_from_object(dev_data, (uint64_t)buffer,
6755                                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6756        std::function<bool()> function = [=]() {
6757            set_memory_valid(dev_data, mem, true);
6758            return false;
6759        };
6760        pCB->validate_functions.push_back(function);
6761    }
6762    return skip_call;
6763}
6764
6765VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
6766                                   uint32_t firstVertex, uint32_t firstInstance) {
6767    bool skipCall = false;
6768    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6769    std::unique_lock<std::mutex> lock(global_lock);
6770    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6771    if (pCB) {
6772        skipCall |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
6773        pCB->drawCount[DRAW]++;
6774        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS);
6775        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6776        // TODO : Need to pass commandBuffer as srcObj here
6777        skipCall |=
6778            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
6779                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW]++);
6780        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6781        if (!skipCall) {
6782            updateResourceTrackingOnDraw(pCB);
6783        }
6784        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
6785    }
6786    lock.unlock();
6787    if (!skipCall)
6788        dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
6789}
6790
6791VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
6792                                          uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
6793                                                            uint32_t firstInstance) {
6794    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6795    bool skipCall = false;
6796    std::unique_lock<std::mutex> lock(global_lock);
6797    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6798    if (pCB) {
6799        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
6800        pCB->drawCount[DRAW_INDEXED]++;
6801        skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS);
6802        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6803        // TODO : Need to pass commandBuffer as srcObj here
6804        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
6805                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
6806                            "vkCmdDrawIndexed() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
6807        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6808        if (!skipCall) {
6809            updateResourceTrackingOnDraw(pCB);
6810        }
6811        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
6812    }
6813    lock.unlock();
6814    if (!skipCall)
6815        dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
6816                                                        firstInstance);
6817}
6818
6819VKAPI_ATTR void VKAPI_CALL
6820CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
6821    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6822    bool skipCall = false;
6823    std::unique_lock<std::mutex> lock(global_lock);
6824    VkDeviceMemory mem;
6825    // MTMTODO : merge with code below
6826    skipCall =
6827        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6828    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndirect");
6829    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6830    if (pCB) {
6831        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
6832        pCB->drawCount[DRAW_INDIRECT]++;
6833        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS);
6834        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6835        // TODO : Need to pass commandBuffer as srcObj here
6836        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
6837                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
6838                            "vkCmdDrawIndirect() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
6839        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6840        if (!skipCall) {
6841            updateResourceTrackingOnDraw(pCB);
6842        }
6843        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndirect");
6844    }
6845    lock.unlock();
6846    if (!skipCall)
6847        dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
6848}
6849
6850VKAPI_ATTR void VKAPI_CALL
6851CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
6852    bool skipCall = false;
6853    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6854    std::unique_lock<std::mutex> lock(global_lock);
6855    VkDeviceMemory mem;
6856    // MTMTODO : merge with code below
6857    skipCall =
6858        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6859    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndexedIndirect");
6860    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6861    if (pCB) {
6862        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
6863        pCB->drawCount[DRAW_INDEXED_INDIRECT]++;
6864        skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS);
6865        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6866        // TODO : Need to pass commandBuffer as srcObj here
6867        skipCall |=
6868            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
6869                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call 0x%" PRIx64 ", reporting DS state:",
6870                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
6871        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6872        if (!skipCall) {
6873            updateResourceTrackingOnDraw(pCB);
6874        }
6875        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexedIndirect");
6876    }
6877    lock.unlock();
6878    if (!skipCall)
6879        dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
6880}
6881
6882VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
6883    bool skipCall = false;
6884    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6885    std::unique_lock<std::mutex> lock(global_lock);
6886    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6887    if (pCB) {
6888        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE);
6889        // TODO : Call below is temporary until call above can be re-enabled
6890        update_shader_storage_images_and_buffers(dev_data, pCB);
6891        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6892        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
6893        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
6894    }
6895    lock.unlock();
6896    if (!skipCall)
6897        dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
6898}
6899
6900VKAPI_ATTR void VKAPI_CALL
6901CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
6902    bool skipCall = false;
6903    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6904    std::unique_lock<std::mutex> lock(global_lock);
6905    VkDeviceMemory mem;
6906    skipCall =
6907        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
6908    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDispatchIndirect");
6909    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6910    if (pCB) {
6911        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE);
6912        // TODO : Call below is temporary until call above can be re-enabled
6913        update_shader_storage_images_and_buffers(dev_data, pCB);
6914        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6915        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
6916        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatchIndirect");
6917    }
6918    lock.unlock();
6919    if (!skipCall)
6920        dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset);
6921}
6922
6923VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
6924                                         uint32_t regionCount, const VkBufferCopy *pRegions) {
6925    bool skipCall = false;
6926    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6927    std::unique_lock<std::mutex> lock(global_lock);
6928    VkDeviceMemory src_mem, dst_mem;
6929    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &src_mem);
6930    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyBuffer");
6931    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &dst_mem);
6932
6933    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyBuffer");
6934    // Validate that SRC & DST buffers have correct usage flags set
6935    skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
6936                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
6937    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
6938                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
6939    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
6940    if (cb_data != dev_data->commandBufferMap.end()) {
6941        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyBuffer()"); };
6942        cb_data->second->validate_functions.push_back(function);
6943        function = [=]() {
6944            set_memory_valid(dev_data, dst_mem, true);
6945            return false;
6946        };
6947        cb_data->second->validate_functions.push_back(function);
6948
6949        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
6950        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyBuffer");
6951    }
6952    lock.unlock();
6953    if (!skipCall)
6954        dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
6955}
6956
6957static bool VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, VkImageSubresourceLayers subLayers,
6958                                    VkImageLayout srcImageLayout) {
6959    bool skip_call = false;
6960
6961    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
6962    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
6963    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
6964        uint32_t layer = i + subLayers.baseArrayLayer;
6965        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
6966        IMAGE_CMD_BUF_LAYOUT_NODE node;
6967        if (!FindLayout(pCB, srcImage, sub, node)) {
6968            SetLayout(pCB, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
6969            continue;
6970        }
6971        if (node.layout != srcImageLayout) {
6972            // TODO: Improve log message in the next pass
6973            skip_call |=
6974                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
6975                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
6976                                                                        "and doesn't match the current layout %s.",
6977                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
6978        }
6979    }
6980    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
6981        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
6982            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
6983            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
6984                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
6985                                 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
6986        } else {
6987            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6988                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
6989                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
6990                                 string_VkImageLayout(srcImageLayout));
6991        }
6992    }
6993    return skip_call;
6994}
6995
6996static bool VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkImageSubresourceLayers subLayers,
6997                                  VkImageLayout destImageLayout) {
6998    bool skip_call = false;
6999
7000    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7001    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7002    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7003        uint32_t layer = i + subLayers.baseArrayLayer;
7004        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7005        IMAGE_CMD_BUF_LAYOUT_NODE node;
7006        if (!FindLayout(pCB, destImage, sub, node)) {
7007            SetLayout(pCB, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
7008            continue;
7009        }
7010        if (node.layout != destImageLayout) {
7011            skip_call |=
7012                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7013                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7014                                                                        "doesn't match the current layout %s.",
7015                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7016        }
7017    }
7018    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7019        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7020            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7021            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7022                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7023                                 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7024        } else {
7025            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7026                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
7027                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
7028                                 string_VkImageLayout(destImageLayout));
7029        }
7030    }
7031    return skip_call;
7032}
7033
7034VKAPI_ATTR void VKAPI_CALL
7035CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7036             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
7037    bool skipCall = false;
7038    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7039    std::unique_lock<std::mutex> lock(global_lock);
7040    VkDeviceMemory src_mem, dst_mem;
7041    // Validate that src & dst images have correct usage flags set
7042    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7043    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyImage");
7044
7045    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7046    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyImage");
7047    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7048                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7049    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7050                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7051    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7052    if (cb_data != dev_data->commandBufferMap.end()) {
7053        std::function<bool()> function = [=]() {
7054            return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyImage()", srcImage);
7055        };
7056        cb_data->second->validate_functions.push_back(function);
7057        function = [=]() {
7058            set_memory_valid(dev_data, dst_mem, true, dstImage);
7059            return false;
7060        };
7061        cb_data->second->validate_functions.push_back(function);
7062
7063        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYIMAGE, "vkCmdCopyImage()");
7064        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyImage");
7065        for (uint32_t i = 0; i < regionCount; ++i) {
7066            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].srcSubresource, srcImageLayout);
7067            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].dstSubresource, dstImageLayout);
7068        }
7069    }
7070    lock.unlock();
7071    if (!skipCall)
7072        dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7073                                                      regionCount, pRegions);
7074}
7075
7076VKAPI_ATTR void VKAPI_CALL
7077CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7078             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
7079    bool skipCall = false;
7080    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7081    std::unique_lock<std::mutex> lock(global_lock);
7082    VkDeviceMemory src_mem, dst_mem;
7083    // Validate that src & dst images have correct usage flags set
7084    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7085    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdBlitImage");
7086
7087    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7088    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdBlitImage");
7089    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7090                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7091    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7092                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7093
7094    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7095    if (cb_data != dev_data->commandBufferMap.end()) {
7096        std::function<bool()> function = [=]() {
7097            return validate_memory_is_valid(dev_data, src_mem, "vkCmdBlitImage()", srcImage);
7098        };
7099        cb_data->second->validate_functions.push_back(function);
7100        function = [=]() {
7101            set_memory_valid(dev_data, dst_mem, true, dstImage);
7102            return false;
7103        };
7104        cb_data->second->validate_functions.push_back(function);
7105
7106        skipCall |= addCmd(dev_data, cb_data->second, CMD_BLITIMAGE, "vkCmdBlitImage()");
7107        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdBlitImage");
7108    }
7109    lock.unlock();
7110    if (!skipCall)
7111        dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7112                                                      regionCount, pRegions, filter);
7113}
7114
7115VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
7116                                                VkImage dstImage, VkImageLayout dstImageLayout,
7117                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7118    bool skipCall = false;
7119    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7120    std::unique_lock<std::mutex> lock(global_lock);
7121    VkDeviceMemory dst_mem, src_mem;
7122    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7123    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyBufferToImage");
7124
7125    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &src_mem);
7126    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyBufferToImage");
7127    // Validate that src buff & dst image have correct usage flags set
7128    skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyBufferToImage()",
7129                                            "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7130    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyBufferToImage()",
7131                                           "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7132    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7133    if (cb_data != dev_data->commandBufferMap.end()) {
7134        std::function<bool()> function = [=]() {
7135            set_memory_valid(dev_data, dst_mem, true, dstImage);
7136            return false;
7137        };
7138        cb_data->second->validate_functions.push_back(function);
7139        function = [=]() { return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyBufferToImage()"); };
7140        cb_data->second->validate_functions.push_back(function);
7141
7142        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
7143        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyBufferToImage");
7144        for (uint32_t i = 0; i < regionCount; ++i) {
7145            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].imageSubresource, dstImageLayout);
7146        }
7147    }
7148    lock.unlock();
7149    if (!skipCall)
7150        dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
7151                                                              pRegions);
7152}
7153
7154VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
7155                                                VkImageLayout srcImageLayout, VkBuffer dstBuffer,
7156                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7157    bool skipCall = false;
7158    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7159    std::unique_lock<std::mutex> lock(global_lock);
7160    VkDeviceMemory src_mem, dst_mem;
7161    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7162    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyImageToBuffer");
7163
7164    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &dst_mem);
7165    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyImageToBuffer");
7166    // Validate that dst buff & src image have correct usage flags set
7167    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyImageToBuffer()",
7168                                           "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7169    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyImageToBuffer()",
7170                                            "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7171
7172    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7173    if (cb_data != dev_data->commandBufferMap.end()) {
7174        std::function<bool()> function = [=]() {
7175            return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyImageToBuffer()", srcImage);
7176        };
7177        cb_data->second->validate_functions.push_back(function);
7178        function = [=]() {
7179            set_memory_valid(dev_data, dst_mem, true);
7180            return false;
7181        };
7182        cb_data->second->validate_functions.push_back(function);
7183
7184        skipCall |= addCmd(dev_data, cb_data->second, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
7185        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyImageToBuffer");
7186        for (uint32_t i = 0; i < regionCount; ++i) {
7187            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].imageSubresource, srcImageLayout);
7188        }
7189    }
7190    lock.unlock();
7191    if (!skipCall)
7192        dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
7193                                                              pRegions);
7194}
7195
7196VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
7197                                           VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
7198    bool skipCall = false;
7199    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7200    std::unique_lock<std::mutex> lock(global_lock);
7201    VkDeviceMemory mem;
7202    skipCall =
7203        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7204    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdUpdateBuffer");
7205    // Validate that dst buff has correct usage flags set
7206    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdUpdateBuffer()",
7207                                            "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7208
7209    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7210    if (cb_data != dev_data->commandBufferMap.end()) {
7211        std::function<bool()> function = [=]() {
7212            set_memory_valid(dev_data, mem, true);
7213            return false;
7214        };
7215        cb_data->second->validate_functions.push_back(function);
7216
7217        skipCall |= addCmd(dev_data, cb_data->second, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
7218        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyUpdateBuffer");
7219    }
7220    lock.unlock();
7221    if (!skipCall)
7222        dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
7223}
7224
7225VKAPI_ATTR void VKAPI_CALL
7226CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
7227    bool skipCall = false;
7228    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7229    std::unique_lock<std::mutex> lock(global_lock);
7230    VkDeviceMemory mem;
7231    skipCall =
7232        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7233    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdFillBuffer");
7234    // Validate that dst buff has correct usage flags set
7235    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdFillBuffer()",
7236                                            "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7237
7238    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7239    if (cb_data != dev_data->commandBufferMap.end()) {
7240        std::function<bool()> function = [=]() {
7241            set_memory_valid(dev_data, mem, true);
7242            return false;
7243        };
7244        cb_data->second->validate_functions.push_back(function);
7245
7246        skipCall |= addCmd(dev_data, cb_data->second, CMD_FILLBUFFER, "vkCmdFillBuffer()");
7247        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdCopyFillBuffer");
7248    }
7249    lock.unlock();
7250    if (!skipCall)
7251        dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
7252}
7253
7254VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
7255                                               const VkClearAttachment *pAttachments, uint32_t rectCount,
7256                                               const VkClearRect *pRects) {
7257    bool skipCall = false;
7258    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7259    std::unique_lock<std::mutex> lock(global_lock);
7260    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7261    if (pCB) {
7262        skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
7263        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
7264        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
7265            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
7266            // TODO : commandBuffer should be srcObj
7267            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
7268            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
7269            // call CmdClearAttachments
7270            // Otherwise this seems more like a performance warning.
7271            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7272                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
7273                                "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
7274                                " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
7275                                (uint64_t)(commandBuffer));
7276        }
7277        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments");
7278    }
7279
7280    // Validate that attachment is in reference list of active subpass
7281    if (pCB->activeRenderPass) {
7282        const VkRenderPassCreateInfo *pRPCI = pCB->activeRenderPass->pCreateInfo;
7283        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
7284
7285        for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
7286            const VkClearAttachment *attachment = &pAttachments[attachment_idx];
7287            if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
7288                bool found = false;
7289                for (uint32_t i = 0; i < pSD->colorAttachmentCount; i++) {
7290                    if (attachment->colorAttachment == pSD->pColorAttachments[i].attachment) {
7291                        found = true;
7292                        break;
7293                    }
7294                }
7295                if (!found) {
7296                    skipCall |= log_msg(
7297                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7298                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
7299                        "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d",
7300                        attachment->colorAttachment, pCB->activeSubpass);
7301                }
7302            } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
7303                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
7304                    (pSD->pDepthStencilAttachment->attachment ==
7305                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
7306
7307                    skipCall |= log_msg(
7308                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7309                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
7310                        "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found "
7311                        "in active subpass %d",
7312                        attachment->colorAttachment,
7313                        (pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED,
7314                        pCB->activeSubpass);
7315                }
7316            }
7317        }
7318    }
7319    lock.unlock();
7320    if (!skipCall)
7321        dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
7322}
7323
7324VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
7325                                              VkImageLayout imageLayout, const VkClearColorValue *pColor,
7326                                              uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
7327    bool skipCall = false;
7328    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7329    std::unique_lock<std::mutex> lock(global_lock);
7330    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
7331    VkDeviceMemory mem;
7332    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7333    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearColorImage");
7334    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7335    if (cb_data != dev_data->commandBufferMap.end()) {
7336        std::function<bool()> function = [=]() {
7337            set_memory_valid(dev_data, mem, true, image);
7338            return false;
7339        };
7340        cb_data->second->validate_functions.push_back(function);
7341
7342        skipCall |= addCmd(dev_data, cb_data->second, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
7343        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdClearColorImage");
7344    }
7345    lock.unlock();
7346    if (!skipCall)
7347        dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
7348}
7349
7350VKAPI_ATTR void VKAPI_CALL
7351CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
7352                          const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
7353                          const VkImageSubresourceRange *pRanges) {
7354    bool skipCall = false;
7355    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7356    std::unique_lock<std::mutex> lock(global_lock);
7357    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
7358    VkDeviceMemory mem;
7359    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7360    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearDepthStencilImage");
7361    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7362    if (cb_data != dev_data->commandBufferMap.end()) {
7363        std::function<bool()> function = [=]() {
7364            set_memory_valid(dev_data, mem, true, image);
7365            return false;
7366        };
7367        cb_data->second->validate_functions.push_back(function);
7368
7369        skipCall |= addCmd(dev_data, cb_data->second, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
7370        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdClearDepthStencilImage");
7371    }
7372    lock.unlock();
7373    if (!skipCall)
7374        dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
7375                                                                   pRanges);
7376}
7377
7378VKAPI_ATTR void VKAPI_CALL
7379CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7380                VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
7381    bool skipCall = false;
7382    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7383    std::unique_lock<std::mutex> lock(global_lock);
7384    VkDeviceMemory src_mem, dst_mem;
7385    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &src_mem);
7386    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdResolveImage");
7387
7388    skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &dst_mem);
7389    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdResolveImage");
7390    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7391    if (cb_data != dev_data->commandBufferMap.end()) {
7392        std::function<bool()> function = [=]() {
7393            return validate_memory_is_valid(dev_data, src_mem, "vkCmdResolveImage()", srcImage);
7394        };
7395        cb_data->second->validate_functions.push_back(function);
7396        function = [=]() {
7397            set_memory_valid(dev_data, dst_mem, true, dstImage);
7398            return false;
7399        };
7400        cb_data->second->validate_functions.push_back(function);
7401
7402        skipCall |= addCmd(dev_data, cb_data->second, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
7403        skipCall |= insideRenderPass(dev_data, cb_data->second, "vkCmdResolveImage");
7404    }
7405    lock.unlock();
7406    if (!skipCall)
7407        dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7408                                                         regionCount, pRegions);
7409}
7410
7411bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7412    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7413    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7414    if (pCB) {
7415        pCB->eventToStageMap[event] = stageMask;
7416    }
7417    auto queue_data = dev_data->queueMap.find(queue);
7418    if (queue_data != dev_data->queueMap.end()) {
7419        queue_data->second.eventToStageMap[event] = stageMask;
7420    }
7421    return false;
7422}
7423
7424VKAPI_ATTR void VKAPI_CALL
7425CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7426    bool skipCall = false;
7427    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7428    std::unique_lock<std::mutex> lock(global_lock);
7429    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7430    if (pCB) {
7431        skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
7432        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
7433        pCB->events.push_back(event);
7434        std::function<bool(VkQueue)> eventUpdate =
7435            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
7436        pCB->eventUpdates.push_back(eventUpdate);
7437    }
7438    lock.unlock();
7439    if (!skipCall)
7440        dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask);
7441}
7442
7443VKAPI_ATTR void VKAPI_CALL
7444CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7445    bool skipCall = false;
7446    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7447    std::unique_lock<std::mutex> lock(global_lock);
7448    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7449    if (pCB) {
7450        skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
7451        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
7452        pCB->events.push_back(event);
7453        std::function<bool(VkQueue)> eventUpdate =
7454            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
7455        pCB->eventUpdates.push_back(eventUpdate);
7456    }
7457    lock.unlock();
7458    if (!skipCall)
7459        dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask);
7460}
7461
7462static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
7463                                   const VkImageMemoryBarrier *pImgMemBarriers) {
7464    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7465    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7466    bool skip = false;
7467    uint32_t levelCount = 0;
7468    uint32_t layerCount = 0;
7469
7470    for (uint32_t i = 0; i < memBarrierCount; ++i) {
7471        auto mem_barrier = &pImgMemBarriers[i];
7472        if (!mem_barrier)
7473            continue;
7474        // TODO: Do not iterate over every possibility - consolidate where
7475        // possible
7476        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
7477
7478        for (uint32_t j = 0; j < levelCount; j++) {
7479            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
7480            for (uint32_t k = 0; k < layerCount; k++) {
7481                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
7482                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
7483                IMAGE_CMD_BUF_LAYOUT_NODE node;
7484                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
7485                    SetLayout(pCB, mem_barrier->image, sub,
7486                              IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
7487                    continue;
7488                }
7489                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
7490                    // TODO: Set memory invalid which is in mem_tracker currently
7491                } else if (node.layout != mem_barrier->oldLayout) {
7492                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7493                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
7494                                                                                    "when current layout is %s.",
7495                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
7496                }
7497                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
7498            }
7499        }
7500    }
7501    return skip;
7502}
7503
7504// Print readable FlagBits in FlagMask
7505static std::string string_VkAccessFlags(VkAccessFlags accessMask) {
7506    std::string result;
7507    std::string separator;
7508
7509    if (accessMask == 0) {
7510        result = "[None]";
7511    } else {
7512        result = "[";
7513        for (auto i = 0; i < 32; i++) {
7514            if (accessMask & (1 << i)) {
7515                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
7516                separator = " | ";
7517            }
7518        }
7519        result = result + "]";
7520    }
7521    return result;
7522}
7523
7524// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
7525// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
7526// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
7527static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
7528                             const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
7529                             const char *type) {
7530    bool skip_call = false;
7531
7532    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
7533        if (accessMask & ~(required_bit | optional_bits)) {
7534            // TODO: Verify against Valid Use
7535            skip_call |=
7536                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7537                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
7538                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
7539        }
7540    } else {
7541        if (!required_bit) {
7542            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7543                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
7544                                                                  "%s when layout is %s, unless the app has previously added a "
7545                                                                  "barrier for this transition.",
7546                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
7547                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
7548        } else {
7549            std::string opt_bits;
7550            if (optional_bits != 0) {
7551                std::stringstream ss;
7552                ss << optional_bits;
7553                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
7554            }
7555            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7556                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
7557                                                                  "layout is %s, unless the app has previously added a barrier for "
7558                                                                  "this transition.",
7559                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
7560                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
7561        }
7562    }
7563    return skip_call;
7564}
7565
7566static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
7567                                        const VkImageLayout &layout, const char *type) {
7568    bool skip_call = false;
7569    switch (layout) {
7570    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
7571        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
7572                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
7573        break;
7574    }
7575    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
7576        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
7577                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
7578        break;
7579    }
7580    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
7581        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
7582        break;
7583    }
7584    case VK_IMAGE_LAYOUT_PREINITIALIZED: {
7585        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
7586        break;
7587    }
7588    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
7589        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
7590                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
7591        break;
7592    }
7593    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
7594        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
7595                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
7596        break;
7597    }
7598    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
7599        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
7600        break;
7601    }
7602    case VK_IMAGE_LAYOUT_UNDEFINED: {
7603        if (accessMask != 0) {
7604            // TODO: Verify against Valid Use section spec
7605            skip_call |=
7606                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7607                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
7608                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
7609        }
7610        break;
7611    }
7612    case VK_IMAGE_LAYOUT_GENERAL:
7613    default: { break; }
7614    }
7615    return skip_call;
7616}
7617
7618static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
7619                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
7620                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
7621                             const VkImageMemoryBarrier *pImageMemBarriers) {
7622    bool skip_call = false;
7623    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7624    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7625    if (pCB->activeRenderPass && memBarrierCount) {
7626        if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
7627            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7628                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
7629                                                                  "with no self dependency specified.",
7630                                 funcName, pCB->activeSubpass);
7631        }
7632    }
7633    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
7634        auto mem_barrier = &pImageMemBarriers[i];
7635        auto image_data = dev_data->imageMap.find(mem_barrier->image);
7636        if (image_data != dev_data->imageMap.end()) {
7637            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
7638            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
7639            if (image_data->second.createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
7640                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
7641                // be VK_QUEUE_FAMILY_IGNORED
7642                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
7643                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7644                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7645                                         "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
7646                                         "VK_SHARING_MODE_CONCURRENT.  Src and dst "
7647                                         " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
7648                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
7649                }
7650            } else {
7651                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
7652                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
7653                // or both be a valid queue family
7654                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
7655                    (src_q_f_index != dst_q_f_index)) {
7656                    skip_call |=
7657                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7658                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
7659                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
7660                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
7661                                                                     "must be.",
7662                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
7663                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
7664                           ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
7665                            (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
7666                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7667                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7668                                         "%s: Image 0x%" PRIx64 " was created with sharingMode "
7669                                         "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
7670                                         " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
7671                                         "queueFamilies crated for this device.",
7672                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
7673                                         dst_q_f_index, dev_data->phys_dev_properties.queue_family_properties.size());
7674                }
7675            }
7676        }
7677
7678        if (mem_barrier) {
7679            skip_call |=
7680                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
7681            skip_call |=
7682                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
7683            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
7684                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7685                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
7686                                                         "PREINITIALIZED.",
7687                        funcName);
7688            }
7689            auto image_data = dev_data->imageMap.find(mem_barrier->image);
7690            VkFormat format = VK_FORMAT_UNDEFINED;
7691            uint32_t arrayLayers = 0, mipLevels = 0;
7692            bool imageFound = false;
7693            if (image_data != dev_data->imageMap.end()) {
7694                format = image_data->second.createInfo.format;
7695                arrayLayers = image_data->second.createInfo.arrayLayers;
7696                mipLevels = image_data->second.createInfo.mipLevels;
7697                imageFound = true;
7698            } else if (dev_data->device_extensions.wsi_enabled) {
7699                auto imageswap_data = dev_data->device_extensions.imageToSwapchainMap.find(mem_barrier->image);
7700                if (imageswap_data != dev_data->device_extensions.imageToSwapchainMap.end()) {
7701                    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(imageswap_data->second);
7702                    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
7703                        format = swapchain_data->second->createInfo.imageFormat;
7704                        arrayLayers = swapchain_data->second->createInfo.imageArrayLayers;
7705                        mipLevels = 1;
7706                        imageFound = true;
7707                    }
7708                }
7709            }
7710            if (imageFound) {
7711                if (vk_format_is_depth_and_stencil(format) &&
7712                    (!(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
7713                     !(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))) {
7714                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7715                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth and stencil format and thus must "
7716                                                             "have both VK_IMAGE_ASPECT_DEPTH_BIT and "
7717                                                             "VK_IMAGE_ASPECT_STENCIL_BIT set.",
7718                            funcName);
7719                }
7720                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
7721                                     ? 1
7722                                     : mem_barrier->subresourceRange.layerCount;
7723                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
7724                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7725                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
7726                                                             "baseArrayLayer (%d) and layerCount (%d) be less "
7727                                                             "than or equal to the total number of layers (%d).",
7728                            funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
7729                            arrayLayers);
7730                }
7731                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
7732                                     ? 1
7733                                     : mem_barrier->subresourceRange.levelCount;
7734                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
7735                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7736                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
7737                                                             "(%d) and levelCount (%d) be less than or equal to "
7738                                                             "the total number of levels (%d).",
7739                            funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
7740                            mipLevels);
7741                }
7742            }
7743        }
7744    }
7745    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
7746        auto mem_barrier = &pBufferMemBarriers[i];
7747        if (pCB->activeRenderPass) {
7748            skip_call |=
7749                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7750                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
7751        }
7752        if (!mem_barrier)
7753            continue;
7754
7755        // Validate buffer barrier queue family indices
7756        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
7757             mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
7758            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
7759             mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
7760            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7761                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7762                                 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
7763                                 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
7764                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
7765                                 dev_data->phys_dev_properties.queue_family_properties.size());
7766        }
7767
7768        auto buffer_data = dev_data->bufferMap.find(mem_barrier->buffer);
7769        if (buffer_data != dev_data->bufferMap.end()) {
7770            VkDeviceSize buffer_size = (buffer_data->second.createInfo.sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO)
7771                                           ? buffer_data->second.createInfo.size
7772                                           : 0;
7773            if (mem_barrier->offset >= buffer_size) {
7774                skip_call |= log_msg(
7775                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7776                    DRAWSTATE_INVALID_BARRIER, "DS",
7777                    "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
7778                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
7779                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(buffer_size));
7780            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
7781                skip_call |= log_msg(
7782                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7783                    DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
7784                                                     " whose sum is greater than total size 0x%" PRIx64 ".",
7785                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
7786                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
7787                    reinterpret_cast<const uint64_t &>(buffer_size));
7788            }
7789        }
7790    }
7791    return skip_call;
7792}
7793
7794bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
7795    bool skip_call = false;
7796    VkPipelineStageFlags stageMask = 0;
7797    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
7798    for (uint32_t i = 0; i < eventCount; ++i) {
7799        auto event = pCB->events[firstEventIndex + i];
7800        auto queue_data = dev_data->queueMap.find(queue);
7801        if (queue_data == dev_data->queueMap.end())
7802            return false;
7803        auto event_data = queue_data->second.eventToStageMap.find(event);
7804        if (event_data != queue_data->second.eventToStageMap.end()) {
7805            stageMask |= event_data->second;
7806        } else {
7807            auto global_event_data = dev_data->eventMap.find(event);
7808            if (global_event_data == dev_data->eventMap.end()) {
7809                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
7810                                     reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
7811                                     "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
7812                                     reinterpret_cast<const uint64_t &>(event));
7813            } else {
7814                stageMask |= global_event_data->second.stageMask;
7815            }
7816        }
7817    }
7818    if (sourceStageMask != stageMask) {
7819        skip_call |=
7820            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7821                    DRAWSTATE_INVALID_EVENT, "DS",
7822                    "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%x which must be the bitwise OR of the "
7823                    "stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with vkSetEvent.",
7824                    sourceStageMask);
7825    }
7826    return skip_call;
7827}
7828
7829VKAPI_ATTR void VKAPI_CALL
7830CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
7831              VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
7832              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
7833              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
7834    bool skipCall = false;
7835    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7836    std::unique_lock<std::mutex> lock(global_lock);
7837    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7838    if (pCB) {
7839        auto firstEventIndex = pCB->events.size();
7840        for (uint32_t i = 0; i < eventCount; ++i) {
7841            pCB->waitedEvents.push_back(pEvents[i]);
7842            pCB->events.push_back(pEvents[i]);
7843        }
7844        std::function<bool(VkQueue)> eventUpdate =
7845            std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask);
7846        pCB->eventUpdates.push_back(eventUpdate);
7847        if (pCB->state == CB_RECORDING) {
7848            skipCall |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
7849        } else {
7850            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
7851        }
7852        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
7853        skipCall |=
7854            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7855                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7856    }
7857    lock.unlock();
7858    if (!skipCall)
7859        dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
7860                                                       memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7861                                                       pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7862}
7863
7864VKAPI_ATTR void VKAPI_CALL
7865CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
7866                   VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
7867                   uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
7868                   uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
7869    bool skipCall = false;
7870    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7871    std::unique_lock<std::mutex> lock(global_lock);
7872    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7873    if (pCB) {
7874        skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
7875        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
7876        skipCall |=
7877            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7878                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7879    }
7880    lock.unlock();
7881    if (!skipCall)
7882        dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
7883                                                            memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7884                                                            pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7885}
7886
7887VKAPI_ATTR void VKAPI_CALL
7888CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
7889    bool skipCall = false;
7890    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7891    std::unique_lock<std::mutex> lock(global_lock);
7892    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7893    if (pCB) {
7894        QueryObject query = {queryPool, slot};
7895        pCB->activeQueries.insert(query);
7896        if (!pCB->startedQueries.count(query)) {
7897            pCB->startedQueries.insert(query);
7898        }
7899        skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
7900    }
7901    lock.unlock();
7902    if (!skipCall)
7903        dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
7904}
7905
7906VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
7907    bool skipCall = false;
7908    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7909    std::unique_lock<std::mutex> lock(global_lock);
7910    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7911    if (pCB) {
7912        QueryObject query = {queryPool, slot};
7913        if (!pCB->activeQueries.count(query)) {
7914            skipCall |=
7915                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7916                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d",
7917                        (uint64_t)(queryPool), slot);
7918        } else {
7919            pCB->activeQueries.erase(query);
7920        }
7921        pCB->queryToStateMap[query] = 1;
7922        if (pCB->state == CB_RECORDING) {
7923            skipCall |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
7924        } else {
7925            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
7926        }
7927    }
7928    lock.unlock();
7929    if (!skipCall)
7930        dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
7931}
7932
7933VKAPI_ATTR void VKAPI_CALL
7934CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
7935    bool skipCall = false;
7936    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7937    std::unique_lock<std::mutex> lock(global_lock);
7938    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7939    if (pCB) {
7940        for (uint32_t i = 0; i < queryCount; i++) {
7941            QueryObject query = {queryPool, firstQuery + i};
7942            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
7943            pCB->queryToStateMap[query] = 0;
7944        }
7945        if (pCB->state == CB_RECORDING) {
7946            skipCall |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
7947        } else {
7948            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
7949        }
7950        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
7951    }
7952    lock.unlock();
7953    if (!skipCall)
7954        dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
7955}
7956
7957VKAPI_ATTR void VKAPI_CALL
7958CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
7959                        VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
7960    bool skipCall = false;
7961    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7962    std::unique_lock<std::mutex> lock(global_lock);
7963    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7964#if MTMERGESOURCE
7965    VkDeviceMemory mem;
7966    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7967    skipCall |=
7968        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7969    if (cb_data != dev_data->commandBufferMap.end()) {
7970        std::function<bool()> function = [=]() {
7971            set_memory_valid(dev_data, mem, true);
7972            return false;
7973        };
7974        cb_data->second->validate_functions.push_back(function);
7975    }
7976    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyQueryPoolResults");
7977    // Validate that DST buffer has correct usage flags set
7978    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7979                                            "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7980#endif
7981    if (pCB) {
7982        for (uint32_t i = 0; i < queryCount; i++) {
7983            QueryObject query = {queryPool, firstQuery + i};
7984            if (!pCB->queryToStateMap[query]) {
7985                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7986                                    __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
7987                                    "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
7988                                    (uint64_t)(queryPool), firstQuery + i);
7989            }
7990        }
7991        if (pCB->state == CB_RECORDING) {
7992            skipCall |= addCmd(dev_data, pCB, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
7993        } else {
7994            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
7995        }
7996        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyQueryPoolResults");
7997    }
7998    lock.unlock();
7999    if (!skipCall)
8000        dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
8001                                                                 dstOffset, stride, flags);
8002}
8003
8004VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
8005                                            VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
8006                                            const void *pValues) {
8007    bool skipCall = false;
8008    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8009    std::unique_lock<std::mutex> lock(global_lock);
8010    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8011    if (pCB) {
8012        if (pCB->state == CB_RECORDING) {
8013            skipCall |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8014        } else {
8015            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
8016        }
8017    }
8018    skipCall |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
8019    if (0 == stageFlags) {
8020        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8021                            DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() call has no stageFlags set.");
8022    }
8023
8024    // Check if push constant update is within any of the ranges with the same stage flags specified in pipeline layout.
8025    auto pipeline_layout_it = dev_data->pipelineLayoutMap.find(layout);
8026    if (pipeline_layout_it == dev_data->pipelineLayoutMap.end()) {
8027        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8028                            DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() Pipeline Layout 0x%" PRIx64 " not found.",
8029                            (uint64_t)layout);
8030    } else {
8031        // Coalesce adjacent/overlapping pipeline ranges before checking to see if incoming range is
8032        // contained in the pipeline ranges.
8033        // Build a {start, end} span list for ranges with matching stage flags.
8034        const auto &ranges = pipeline_layout_it->second.pushConstantRanges;
8035        struct span {
8036            uint32_t start;
8037            uint32_t end;
8038        };
8039        std::vector<span> spans;
8040        spans.reserve(ranges.size());
8041        for (const auto &iter : ranges) {
8042            if (iter.stageFlags == stageFlags) {
8043                spans.push_back({iter.offset, iter.offset + iter.size});
8044            }
8045        }
8046        if (spans.size() == 0) {
8047            // There were no ranges that matched the stageFlags.
8048            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8049                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS",
8050                                "vkCmdPushConstants() stageFlags = 0x%" PRIx32 " do not match "
8051                                "the stageFlags in any of the ranges in pipeline layout 0x%" PRIx64 ".",
8052                                (uint32_t)stageFlags, (uint64_t)layout);
8053        } else {
8054            // Sort span list by start value.
8055            struct comparer {
8056                bool operator()(struct span i, struct span j) { return i.start < j.start; }
8057            } my_comparer;
8058            std::sort(spans.begin(), spans.end(), my_comparer);
8059
8060            // Examine two spans at a time.
8061            std::vector<span>::iterator current = spans.begin();
8062            std::vector<span>::iterator next = current + 1;
8063            while (next != spans.end()) {
8064                if (current->end < next->start) {
8065                    // There is a gap; cannot coalesce. Move to the next two spans.
8066                    ++current;
8067                    ++next;
8068                } else {
8069                    // Coalesce the two spans.  The start of the next span
8070                    // is within the current span, so pick the larger of
8071                    // the end values to extend the current span.
8072                    // Then delete the next span and set next to the span after it.
8073                    current->end = max(current->end, next->end);
8074                    next = spans.erase(next);
8075                }
8076            }
8077
8078            // Now we can check if the incoming range is within any of the spans.
8079            bool contained_in_a_range = false;
8080            for (uint32_t i = 0; i < spans.size(); ++i) {
8081                if ((offset >= spans[i].start) && ((uint64_t)offset + (uint64_t)size <= (uint64_t)spans[i].end)) {
8082                    contained_in_a_range = true;
8083                    break;
8084                }
8085            }
8086            if (!contained_in_a_range) {
8087                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8088                                    __LINE__, DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS",
8089                                    "vkCmdPushConstants() Push constant range [%d, %d) "
8090                                    "with stageFlags = 0x%" PRIx32 " "
8091                                    "not within flag-matching ranges in pipeline layout 0x%" PRIx64 ".",
8092                                    offset, offset + size, (uint32_t)stageFlags, (uint64_t)layout);
8093            }
8094        }
8095    }
8096    lock.unlock();
8097    if (!skipCall)
8098        dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8099}
8100
8101VKAPI_ATTR void VKAPI_CALL
8102CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
8103    bool skipCall = false;
8104    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8105    std::unique_lock<std::mutex> lock(global_lock);
8106    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8107    if (pCB) {
8108        QueryObject query = {queryPool, slot};
8109        pCB->queryToStateMap[query] = 1;
8110        if (pCB->state == CB_RECORDING) {
8111            skipCall |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8112        } else {
8113            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
8114        }
8115    }
8116    lock.unlock();
8117    if (!skipCall)
8118        dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8119}
8120
8121VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
8122                                                 const VkAllocationCallbacks *pAllocator,
8123                                                 VkFramebuffer *pFramebuffer) {
8124    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8125    VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
8126    if (VK_SUCCESS == result) {
8127        // Shadow create info and store in map
8128        std::lock_guard<std::mutex> lock(global_lock);
8129
8130        auto & fbNode = dev_data->frameBufferMap[*pFramebuffer];
8131        fbNode.createInfo = *pCreateInfo;
8132        if (pCreateInfo->pAttachments) {
8133            auto attachments = new VkImageView[pCreateInfo->attachmentCount];
8134            memcpy(attachments,
8135                   pCreateInfo->pAttachments,
8136                   pCreateInfo->attachmentCount * sizeof(VkImageView));
8137            fbNode.createInfo.pAttachments = attachments;
8138        }
8139        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8140            VkImageView view = pCreateInfo->pAttachments[i];
8141            auto view_data = dev_data->imageViewMap.find(view);
8142            if (view_data == dev_data->imageViewMap.end()) {
8143                continue;
8144            }
8145            MT_FB_ATTACHMENT_INFO fb_info;
8146            get_mem_binding_from_object(dev_data, (uint64_t)(view_data->second.image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8147                                        &fb_info.mem);
8148            fb_info.image = view_data->second.image;
8149            fbNode.attachments.push_back(fb_info);
8150        }
8151    }
8152    return result;
8153}
8154
8155static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
8156                           std::unordered_set<uint32_t> &processed_nodes) {
8157    // If we have already checked this node we have not found a dependency path so return false.
8158    if (processed_nodes.count(index))
8159        return false;
8160    processed_nodes.insert(index);
8161    const DAGNode &node = subpass_to_node[index];
8162    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
8163    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
8164        for (auto elem : node.prev) {
8165            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
8166                return true;
8167        }
8168    } else {
8169        return true;
8170    }
8171    return false;
8172}
8173
8174static bool CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
8175                                  const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
8176    bool result = true;
8177    // Loop through all subpasses that share the same attachment and make sure a dependency exists
8178    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
8179        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k])
8180            continue;
8181        const DAGNode &node = subpass_to_node[subpass];
8182        // Check for a specified dependency between the two nodes. If one exists we are done.
8183        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
8184        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
8185        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
8186            // If no dependency exits an implicit dependency still might. If so, warn and if not throw an error.
8187            std::unordered_set<uint32_t> processed_nodes;
8188            if (FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
8189                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes)) {
8190                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8191                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8192                                     "A dependency between subpasses %d and %d must exist but only an implicit one is specified.",
8193                                     subpass, dependent_subpasses[k]);
8194            } else {
8195                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8196                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8197                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
8198                                     dependent_subpasses[k]);
8199                result = false;
8200            }
8201        }
8202    }
8203    return result;
8204}
8205
8206static bool CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
8207                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
8208    const DAGNode &node = subpass_to_node[index];
8209    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
8210    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8211    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8212        if (attachment == subpass.pColorAttachments[j].attachment)
8213            return true;
8214    }
8215    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8216        if (attachment == subpass.pDepthStencilAttachment->attachment)
8217            return true;
8218    }
8219    bool result = false;
8220    // Loop through previous nodes and see if any of them write to the attachment.
8221    for (auto elem : node.prev) {
8222        result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
8223    }
8224    // If the attachment was written to by a previous node than this node needs to preserve it.
8225    if (result && depth > 0) {
8226        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8227        bool has_preserved = false;
8228        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8229            if (subpass.pPreserveAttachments[j] == attachment) {
8230                has_preserved = true;
8231                break;
8232            }
8233        }
8234        if (!has_preserved) {
8235            skip_call |=
8236                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8237                        DRAWSTATE_INVALID_RENDERPASS, "DS",
8238                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
8239        }
8240    }
8241    return result;
8242}
8243
8244template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
8245    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
8246           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
8247}
8248
8249bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
8250    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
8251            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
8252}
8253
8254static bool ValidateDependencies(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin,
8255                                 const std::vector<DAGNode> &subpass_to_node) {
8256    bool skip_call = false;
8257    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
8258    const VkRenderPassCreateInfo *pCreateInfo = my_data->renderPassMap.at(pRenderPassBegin->renderPass)->pCreateInfo;
8259    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
8260    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
8261    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
8262    // Find overlapping attachments
8263    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8264        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
8265            VkImageView viewi = pFramebufferInfo->pAttachments[i];
8266            VkImageView viewj = pFramebufferInfo->pAttachments[j];
8267            if (viewi == viewj) {
8268                overlapping_attachments[i].push_back(j);
8269                overlapping_attachments[j].push_back(i);
8270                continue;
8271            }
8272            auto view_data_i = my_data->imageViewMap.find(viewi);
8273            auto view_data_j = my_data->imageViewMap.find(viewj);
8274            if (view_data_i == my_data->imageViewMap.end() || view_data_j == my_data->imageViewMap.end()) {
8275                continue;
8276            }
8277            if (view_data_i->second.image == view_data_j->second.image &&
8278                isRegionOverlapping(view_data_i->second.subresourceRange, view_data_j->second.subresourceRange)) {
8279                overlapping_attachments[i].push_back(j);
8280                overlapping_attachments[j].push_back(i);
8281                continue;
8282            }
8283            auto image_data_i = my_data->imageMap.find(view_data_i->second.image);
8284            auto image_data_j = my_data->imageMap.find(view_data_j->second.image);
8285            if (image_data_i == my_data->imageMap.end() || image_data_j == my_data->imageMap.end()) {
8286                continue;
8287            }
8288            if (image_data_i->second.mem == image_data_j->second.mem &&
8289                isRangeOverlapping(image_data_i->second.memOffset, image_data_i->second.memSize, image_data_j->second.memOffset,
8290                                   image_data_j->second.memSize)) {
8291                overlapping_attachments[i].push_back(j);
8292                overlapping_attachments[j].push_back(i);
8293            }
8294        }
8295    }
8296    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
8297        uint32_t attachment = i;
8298        for (auto other_attachment : overlapping_attachments[i]) {
8299            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8300                skip_call |=
8301                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8302                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
8303                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8304                            attachment, other_attachment);
8305            }
8306            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8307                skip_call |=
8308                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8309                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
8310                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8311                            other_attachment, attachment);
8312            }
8313        }
8314    }
8315    // Find for each attachment the subpasses that use them.
8316    unordered_set<uint32_t> attachmentIndices;
8317    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8318        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8319        attachmentIndices.clear();
8320        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8321            uint32_t attachment = subpass.pInputAttachments[j].attachment;
8322            input_attachment_to_subpass[attachment].push_back(i);
8323            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8324                input_attachment_to_subpass[overlapping_attachment].push_back(i);
8325            }
8326        }
8327        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8328            uint32_t attachment = subpass.pColorAttachments[j].attachment;
8329            output_attachment_to_subpass[attachment].push_back(i);
8330            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8331                output_attachment_to_subpass[overlapping_attachment].push_back(i);
8332            }
8333            attachmentIndices.insert(attachment);
8334        }
8335        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8336            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8337            output_attachment_to_subpass[attachment].push_back(i);
8338            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8339                output_attachment_to_subpass[overlapping_attachment].push_back(i);
8340            }
8341
8342            if (attachmentIndices.count(attachment)) {
8343                skip_call |=
8344                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
8345                            0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8346                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).",
8347                            attachment, i);
8348            }
8349        }
8350    }
8351    // If there is a dependency needed make sure one exists
8352    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8353        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8354        // If the attachment is an input then all subpasses that output must have a dependency relationship
8355        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8356            const uint32_t &attachment = subpass.pInputAttachments[j].attachment;
8357            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8358        }
8359        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
8360        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8361            const uint32_t &attachment = subpass.pColorAttachments[j].attachment;
8362            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8363            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8364        }
8365        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8366            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
8367            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8368            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8369        }
8370    }
8371    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
8372    // written.
8373    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8374        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8375        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8376            CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
8377        }
8378    }
8379    return skip_call;
8380}
8381// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
8382// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that
8383// READ_ONLY layout attachments don't have CLEAR as their loadOp.
8384static bool ValidateLayoutVsAttachmentDescription(debug_report_data *report_data, const VkImageLayout first_layout,
8385                                                  const uint32_t attachment,
8386                                                  const VkAttachmentDescription &attachment_description) {
8387    bool skip_call = false;
8388    // Verify that initial loadOp on READ_ONLY attachments is not CLEAR
8389    if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
8390        if ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
8391            (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)) {
8392            skip_call |=
8393                log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8394                        VkDebugReportObjectTypeEXT(0), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8395                        "Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout));
8396        }
8397    }
8398    return skip_call;
8399}
8400
8401static bool ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
8402    bool skip = false;
8403
8404    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8405        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8406        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8407            if (subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
8408                subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
8409                if (subpass.pInputAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
8410                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8411                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8412                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8413                                    "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
8414                } else {
8415                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8416                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8417                                    "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
8418                                    string_VkImageLayout(subpass.pInputAttachments[j].layout));
8419                }
8420            }
8421            auto attach_index = subpass.pInputAttachments[j].attachment;
8422            skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pInputAttachments[j].layout, attach_index,
8423                                                          pCreateInfo->pAttachments[attach_index]);
8424        }
8425        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8426            if (subpass.pColorAttachments[j].layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
8427                if (subpass.pColorAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
8428                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8429                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8430                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8431                                    "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
8432                } else {
8433                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8434                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8435                                    "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
8436                                    string_VkImageLayout(subpass.pColorAttachments[j].layout));
8437                }
8438            }
8439            auto attach_index = subpass.pColorAttachments[j].attachment;
8440            skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pColorAttachments[j].layout, attach_index,
8441                                                          pCreateInfo->pAttachments[attach_index]);
8442        }
8443        if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
8444            if (subpass.pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
8445                if (subpass.pDepthStencilAttachment->layout == VK_IMAGE_LAYOUT_GENERAL) {
8446                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8447                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8448                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8449                                    "Layout for depth attachment is GENERAL but should be DEPTH_STENCIL_ATTACHMENT_OPTIMAL.");
8450                } else {
8451                    skip |=
8452                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8453                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8454                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.",
8455                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
8456                }
8457            }
8458            auto attach_index = subpass.pDepthStencilAttachment->attachment;
8459            skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pDepthStencilAttachment->layout,
8460                                                          attach_index, pCreateInfo->pAttachments[attach_index]);
8461        }
8462    }
8463    return skip;
8464}
8465
8466static bool CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
8467                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
8468    bool skip_call = false;
8469    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8470        DAGNode &subpass_node = subpass_to_node[i];
8471        subpass_node.pass = i;
8472    }
8473    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
8474        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
8475        if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
8476            dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
8477            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8478                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
8479                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
8480        } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
8481            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8482                                 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
8483        } else if (dependency.srcSubpass == dependency.dstSubpass) {
8484            has_self_dependency[dependency.srcSubpass] = true;
8485        }
8486        if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
8487            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
8488        }
8489        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
8490            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
8491        }
8492    }
8493    return skip_call;
8494}
8495
8496
8497VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
8498                                                  const VkAllocationCallbacks *pAllocator,
8499                                                  VkShaderModule *pShaderModule) {
8500    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8501    bool skip_call = false;
8502
8503    /* Use SPIRV-Tools validator to try and catch any issues with the module itself */
8504    spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
8505    spv_const_binary_t binary { pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t) };
8506    spv_diagnostic diag = nullptr;
8507
8508    auto result = spvValidate(ctx, &binary, &diag);
8509    if (result != SPV_SUCCESS) {
8510        skip_call |= log_msg(my_data->report_data,
8511                             result == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
8512                             VkDebugReportObjectTypeEXT(0), 0,
8513                             __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC", "SPIR-V module not valid: %s",
8514                             diag && diag->error ? diag->error : "(no error text)");
8515    }
8516
8517    spvDiagnosticDestroy(diag);
8518    spvContextDestroy(ctx);
8519
8520    if (skip_call)
8521        return VK_ERROR_VALIDATION_FAILED_EXT;
8522
8523    VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
8524
8525    if (res == VK_SUCCESS) {
8526        std::lock_guard<std::mutex> lock(global_lock);
8527        my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
8528    }
8529    return res;
8530}
8531
8532VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
8533                                                const VkAllocationCallbacks *pAllocator,
8534                                                VkRenderPass *pRenderPass) {
8535    bool skip_call = false;
8536    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8537    // Create DAG
8538    std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
8539    std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
8540    {
8541        std::lock_guard<std::mutex> lock(global_lock);
8542        skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
8543        // Validate
8544        skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
8545        if (skip_call) {
8546            return VK_ERROR_VALIDATION_FAILED_EXT;
8547        }
8548    }
8549    VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
8550    if (VK_SUCCESS == result) {
8551        // TODOSC : Merge in tracking of renderpass from shader_checker
8552        // Shadow create info and store in map
8553        VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
8554        if (pCreateInfo->pAttachments) {
8555            localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount];
8556            memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments,
8557                   localRPCI->attachmentCount * sizeof(VkAttachmentDescription));
8558        }
8559        if (pCreateInfo->pSubpasses) {
8560            localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount];
8561            memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription));
8562
8563            for (uint32_t i = 0; i < localRPCI->subpassCount; i++) {
8564                VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i];
8565                const uint32_t attachmentCount = subpass->inputAttachmentCount +
8566                                                 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) +
8567                                                 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
8568                VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount];
8569
8570                memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount);
8571                subpass->pInputAttachments = attachments;
8572                attachments += subpass->inputAttachmentCount;
8573
8574                memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
8575                subpass->pColorAttachments = attachments;
8576                attachments += subpass->colorAttachmentCount;
8577
8578                if (subpass->pResolveAttachments) {
8579                    memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
8580                    subpass->pResolveAttachments = attachments;
8581                    attachments += subpass->colorAttachmentCount;
8582                }
8583
8584                if (subpass->pDepthStencilAttachment) {
8585                    memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1);
8586                    subpass->pDepthStencilAttachment = attachments;
8587                    attachments += 1;
8588                }
8589
8590                memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount);
8591                subpass->pPreserveAttachments = &attachments->attachment;
8592            }
8593        }
8594        if (pCreateInfo->pDependencies) {
8595            localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
8596            memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies,
8597                   localRPCI->dependencyCount * sizeof(VkSubpassDependency));
8598        }
8599
8600        auto render_pass = new RENDER_PASS_NODE(localRPCI);
8601        render_pass->renderPass = *pRenderPass;
8602        render_pass->hasSelfDependency = has_self_dependency;
8603        render_pass->subpassToNode = subpass_to_node;
8604#if MTMERGESOURCE
8605        // MTMTODO : Merge with code from above to eliminate duplication
8606        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8607            VkAttachmentDescription desc = pCreateInfo->pAttachments[i];
8608            MT_PASS_ATTACHMENT_INFO pass_info;
8609            pass_info.load_op = desc.loadOp;
8610            pass_info.store_op = desc.storeOp;
8611            pass_info.attachment = i;
8612            render_pass->attachments.push_back(pass_info);
8613        }
8614        // TODO: Maybe fill list and then copy instead of locking
8615        std::unordered_map<uint32_t, bool> &attachment_first_read = render_pass->attachment_first_read;
8616        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout =
8617            render_pass->attachment_first_layout;
8618        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8619            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8620            if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
8621                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8622                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8623                                     "Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
8624            }
8625            for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8626                uint32_t attachment = subpass.pPreserveAttachments[j];
8627                if (attachment >= pCreateInfo->attachmentCount) {
8628                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8629                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8630                                         "Preserve attachment %d cannot be greater than the total number of attachments %d.",
8631                                         attachment, pCreateInfo->attachmentCount);
8632                }
8633            }
8634            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8635                uint32_t attachment;
8636                if (subpass.pResolveAttachments) {
8637                    attachment = subpass.pResolveAttachments[j].attachment;
8638                    if (attachment >= pCreateInfo->attachmentCount && attachment != VK_ATTACHMENT_UNUSED) {
8639                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8640                                             __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8641                                             "Color attachment %d cannot be greater than the total number of attachments %d.",
8642                                             attachment, pCreateInfo->attachmentCount);
8643                        continue;
8644                    }
8645                }
8646                attachment = subpass.pColorAttachments[j].attachment;
8647                if (attachment >= pCreateInfo->attachmentCount) {
8648                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8649                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8650                                         "Color attachment %d cannot be greater than the total number of attachments %d.",
8651                                         attachment, pCreateInfo->attachmentCount);
8652                    continue;
8653                }
8654                if (attachment_first_read.count(attachment))
8655                    continue;
8656                attachment_first_read.insert(std::make_pair(attachment, false));
8657                attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
8658            }
8659            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8660                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8661                if (attachment >= pCreateInfo->attachmentCount) {
8662                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8663                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8664                                         "Depth stencil attachment %d cannot be greater than the total number of attachments %d.",
8665                                         attachment, pCreateInfo->attachmentCount);
8666                    continue;
8667                }
8668                if (attachment_first_read.count(attachment))
8669                    continue;
8670                attachment_first_read.insert(std::make_pair(attachment, false));
8671                attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
8672            }
8673            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8674                uint32_t attachment = subpass.pInputAttachments[j].attachment;
8675                if (attachment >= pCreateInfo->attachmentCount) {
8676                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8677                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8678                                         "Input attachment %d cannot be greater than the total number of attachments %d.",
8679                                         attachment, pCreateInfo->attachmentCount);
8680                    continue;
8681                }
8682                if (attachment_first_read.count(attachment))
8683                    continue;
8684                attachment_first_read.insert(std::make_pair(attachment, true));
8685                attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
8686            }
8687        }
8688#endif
8689        {
8690            std::lock_guard<std::mutex> lock(global_lock);
8691            dev_data->renderPassMap[*pRenderPass] = render_pass;
8692        }
8693    }
8694    return result;
8695}
8696// Free the renderpass shadow
8697static void deleteRenderPasses(layer_data *my_data) {
8698    if (my_data->renderPassMap.size() <= 0)
8699        return;
8700    for (auto ii = my_data->renderPassMap.begin(); ii != my_data->renderPassMap.end(); ++ii) {
8701        const VkRenderPassCreateInfo *pRenderPassInfo = (*ii).second->pCreateInfo;
8702        delete[] pRenderPassInfo->pAttachments;
8703        if (pRenderPassInfo->pSubpasses) {
8704            for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) {
8705                // Attachements are all allocated in a block, so just need to
8706                //  find the first non-null one to delete
8707                if (pRenderPassInfo->pSubpasses[i].pInputAttachments) {
8708                    delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments;
8709                } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) {
8710                    delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments;
8711                } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) {
8712                    delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments;
8713                } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) {
8714                    delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments;
8715                }
8716            }
8717            delete[] pRenderPassInfo->pSubpasses;
8718        }
8719        delete[] pRenderPassInfo->pDependencies;
8720        delete pRenderPassInfo;
8721        delete (*ii).second;
8722    }
8723    my_data->renderPassMap.clear();
8724}
8725
8726static bool VerifyFramebufferAndRenderPassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
8727    bool skip_call = false;
8728    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8729    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8730    const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
8731    const VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer].createInfo;
8732    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
8733        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8734                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
8735                                                                 "with a different number of attachments.");
8736    }
8737    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
8738        const VkImageView &image_view = framebufferInfo.pAttachments[i];
8739        auto image_data = dev_data->imageViewMap.find(image_view);
8740        assert(image_data != dev_data->imageViewMap.end());
8741        const VkImage &image = image_data->second.image;
8742        const VkImageSubresourceRange &subRange = image_data->second.subresourceRange;
8743        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
8744                                             pRenderPassInfo->pAttachments[i].initialLayout};
8745        // TODO: Do not iterate over every possibility - consolidate where possible
8746        for (uint32_t j = 0; j < subRange.levelCount; j++) {
8747            uint32_t level = subRange.baseMipLevel + j;
8748            for (uint32_t k = 0; k < subRange.layerCount; k++) {
8749                uint32_t layer = subRange.baseArrayLayer + k;
8750                VkImageSubresource sub = {subRange.aspectMask, level, layer};
8751                IMAGE_CMD_BUF_LAYOUT_NODE node;
8752                if (!FindLayout(pCB, image, sub, node)) {
8753                    SetLayout(pCB, image, sub, newNode);
8754                    continue;
8755                }
8756                if (newNode.layout != node.layout) {
8757                    skip_call |=
8758                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8759                                DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using attachment %i "
8760                                                                    "where the "
8761                                                                    "initial layout is %s and the layout of the attachment at the "
8762                                                                    "start of the render pass is %s. The layouts must match.",
8763                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
8764                }
8765            }
8766        }
8767    }
8768    return skip_call;
8769}
8770
8771static void TransitionSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
8772                                     const int subpass_index) {
8773    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8774    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8775    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
8776    if (render_pass_data == dev_data->renderPassMap.end()) {
8777        return;
8778    }
8779    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
8780    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
8781    if (framebuffer_data == dev_data->frameBufferMap.end()) {
8782        return;
8783    }
8784    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
8785    const VkSubpassDescription &subpass = pRenderPassInfo->pSubpasses[subpass_index];
8786    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8787        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pInputAttachments[j].attachment];
8788        SetLayout(dev_data, pCB, image_view, subpass.pInputAttachments[j].layout);
8789    }
8790    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8791        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pColorAttachments[j].attachment];
8792        SetLayout(dev_data, pCB, image_view, subpass.pColorAttachments[j].layout);
8793    }
8794    if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
8795        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pDepthStencilAttachment->attachment];
8796        SetLayout(dev_data, pCB, image_view, subpass.pDepthStencilAttachment->layout);
8797    }
8798}
8799
8800static bool validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
8801    bool skip_call = false;
8802    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
8803        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8804                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
8805                             cmd_name.c_str());
8806    }
8807    return skip_call;
8808}
8809
8810static void TransitionFinalSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
8811    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8812    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8813    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
8814    if (render_pass_data == dev_data->renderPassMap.end()) {
8815        return;
8816    }
8817    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
8818    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
8819    if (framebuffer_data == dev_data->frameBufferMap.end()) {
8820        return;
8821    }
8822    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
8823    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
8824        const VkImageView &image_view = framebufferInfo.pAttachments[i];
8825        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
8826    }
8827}
8828
8829static bool VerifyRenderAreaBounds(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
8830    bool skip_call = false;
8831    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
8832    if (pRenderPassBegin->renderArea.offset.x < 0 ||
8833        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
8834        pRenderPassBegin->renderArea.offset.y < 0 ||
8835        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
8836        skip_call |= static_cast<bool>(log_msg(
8837            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8838            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
8839            "Cannot execute a render pass with renderArea not within the bound of the "
8840            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
8841            "height %d.",
8842            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
8843            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
8844    }
8845    return skip_call;
8846}
8847
8848VKAPI_ATTR void VKAPI_CALL
8849CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
8850    bool skipCall = false;
8851    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8852    std::unique_lock<std::mutex> lock(global_lock);
8853    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8854    if (pCB) {
8855        if (pRenderPassBegin && pRenderPassBegin->renderPass) {
8856#if MTMERGE
8857            auto pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
8858            if (pass_data != dev_data->renderPassMap.end()) {
8859                RENDER_PASS_NODE* pRPNode = pass_data->second;
8860                pCB->activeFramebuffer = pRenderPassBegin->framebuffer;
8861                auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8862                for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
8863                    MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRenderPassBegin->framebuffer].attachments[i];
8864                    if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
8865                        if (cb_data != dev_data->commandBufferMap.end()) {
8866                            std::function<bool()> function = [=]() {
8867                                set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
8868                                return false;
8869                            };
8870                            cb_data->second->validate_functions.push_back(function);
8871                        }
8872                    } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE) {
8873                        if (cb_data != dev_data->commandBufferMap.end()) {
8874                            std::function<bool()> function = [=]() {
8875                                set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
8876                                return false;
8877                            };
8878                            cb_data->second->validate_functions.push_back(function);
8879                        }
8880                    } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
8881                        if (cb_data != dev_data->commandBufferMap.end()) {
8882                            std::function<bool()> function = [=]() {
8883                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
8884                            };
8885                            cb_data->second->validate_functions.push_back(function);
8886                        }
8887                    }
8888                    if (pRPNode->attachment_first_read[pRPNode->attachments[i].attachment]) {
8889                        if (cb_data != dev_data->commandBufferMap.end()) {
8890                            std::function<bool()> function = [=]() {
8891                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
8892                            };
8893                            cb_data->second->validate_functions.push_back(function);
8894                        }
8895                    }
8896                }
8897            }
8898#endif
8899            skipCall |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
8900            skipCall |= VerifyFramebufferAndRenderPassLayouts(commandBuffer, pRenderPassBegin);
8901            skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass");
8902            auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
8903            if (render_pass_data != dev_data->renderPassMap.end()) {
8904                skipCall |= ValidateDependencies(dev_data, pRenderPassBegin, render_pass_data->second->subpassToNode);
8905                pCB->activeRenderPass = render_pass_data->second;
8906            }
8907            else {
8908                pCB->activeRenderPass = nullptr;
8909            }
8910            skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass");
8911            skipCall |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
8912            // This is a shallow copy as that is all that is needed for now
8913            pCB->activeRenderPassBeginInfo = *pRenderPassBegin;
8914            pCB->activeSubpass = 0;
8915            pCB->activeSubpassContents = contents;
8916            pCB->framebuffers.insert(pRenderPassBegin->framebuffer);
8917            // Connect this framebuffer to this cmdBuffer
8918            dev_data->frameBufferMap[pRenderPassBegin->framebuffer].referencingCmdBuffers.insert(pCB->commandBuffer);
8919        } else {
8920            skipCall |=
8921                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8922                        DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
8923        }
8924    }
8925    lock.unlock();
8926    if (!skipCall) {
8927        dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
8928    }
8929}
8930
8931VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
8932    bool skipCall = false;
8933    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8934    std::unique_lock<std::mutex> lock(global_lock);
8935    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8936    if (pCB) {
8937        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
8938        skipCall |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
8939        pCB->activeSubpass++;
8940        pCB->activeSubpassContents = contents;
8941        TransitionSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
8942        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
8943    }
8944    lock.unlock();
8945    if (!skipCall)
8946        dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents);
8947}
8948
8949VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
8950    bool skipCall = false;
8951    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8952    std::unique_lock<std::mutex> lock(global_lock);
8953    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8954    if (cb_data != dev_data->commandBufferMap.end()) {
8955        RENDER_PASS_NODE* pRPNode = cb_data->second->activeRenderPass;
8956        if (pRPNode) {
8957            for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
8958                MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[cb_data->second->activeFramebuffer].attachments[i];
8959                if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
8960                    if (cb_data != dev_data->commandBufferMap.end()) {
8961                        std::function<bool()> function = [=]() {
8962                            set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
8963                            return false;
8964                        };
8965                        cb_data->second->validate_functions.push_back(function);
8966                    }
8967                } else if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE) {
8968                    if (cb_data != dev_data->commandBufferMap.end()) {
8969                        std::function<bool()> function = [=]() {
8970                            set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
8971                            return false;
8972                        };
8973                        cb_data->second->validate_functions.push_back(function);
8974                    }
8975                }
8976            }
8977        }
8978        skipCall |= outsideRenderPass(dev_data, cb_data->second, "vkCmdEndRenderpass");
8979        skipCall |= validatePrimaryCommandBuffer(dev_data, cb_data->second, "vkCmdEndRenderPass");
8980        skipCall |= addCmd(dev_data, cb_data->second, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
8981        TransitionFinalSubpassLayouts(commandBuffer, &cb_data->second->activeRenderPassBeginInfo);
8982        cb_data->second->activeRenderPass = nullptr;
8983        cb_data->second->activeSubpass = 0;
8984        cb_data->second->activeFramebuffer = VK_NULL_HANDLE;
8985    }
8986    lock.unlock();
8987    if (!skipCall)
8988        dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer);
8989}
8990
8991static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, RENDER_PASS_NODE const *secondaryPass,
8992                                        RENDER_PASS_NODE const *primaryPass, uint32_t primaryAttach, uint32_t secondaryAttach,
8993                                        const char *msg) {
8994    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8995                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
8996                   "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p which has a render pass 0x%" PRIx64
8997                   " that is not compatible with the current render pass 0x%" PRIx64 "."
8998                   "Attachment %" PRIu32 " is not compatible with %" PRIu32 ". %s",
8999                   (void *)secondaryBuffer, (uint64_t)(secondaryPass->renderPass), (uint64_t)(primaryPass->renderPass), primaryAttach, secondaryAttach,
9000                   msg);
9001}
9002
9003static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, RENDER_PASS_NODE const *primaryPass,
9004                                            uint32_t primaryAttach, VkCommandBuffer secondaryBuffer, RENDER_PASS_NODE const *secondaryPass,
9005                                            uint32_t secondaryAttach, bool is_multi) {
9006    bool skip_call = false;
9007    if (primaryPass->pCreateInfo->attachmentCount <= primaryAttach) {
9008        primaryAttach = VK_ATTACHMENT_UNUSED;
9009    }
9010    if (secondaryPass->pCreateInfo->attachmentCount <= secondaryAttach) {
9011        secondaryAttach = VK_ATTACHMENT_UNUSED;
9012    }
9013    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
9014        return skip_call;
9015    }
9016    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
9017        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9018                                                 secondaryAttach, "The first is unused while the second is not.");
9019        return skip_call;
9020    }
9021    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
9022        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9023                                                 secondaryAttach, "The second is unused while the first is not.");
9024        return skip_call;
9025    }
9026    if (primaryPass->pCreateInfo->pAttachments[primaryAttach].format !=
9027        secondaryPass->pCreateInfo->pAttachments[secondaryAttach].format) {
9028        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9029                                                 secondaryAttach, "They have different formats.");
9030    }
9031    if (primaryPass->pCreateInfo->pAttachments[primaryAttach].samples !=
9032        secondaryPass->pCreateInfo->pAttachments[secondaryAttach].samples) {
9033        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9034                                                 secondaryAttach, "They have different samples.");
9035    }
9036    if (is_multi &&
9037        primaryPass->pCreateInfo->pAttachments[primaryAttach].flags !=
9038            secondaryPass->pCreateInfo->pAttachments[secondaryAttach].flags) {
9039        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9040                                                 secondaryAttach, "They have different flags.");
9041    }
9042    return skip_call;
9043}
9044
9045static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, RENDER_PASS_NODE const *primaryPass,
9046                                         VkCommandBuffer secondaryBuffer, RENDER_PASS_NODE const *secondaryPass, const int subpass,
9047                                         bool is_multi) {
9048    bool skip_call = false;
9049    const VkSubpassDescription &primary_desc = primaryPass->pCreateInfo->pSubpasses[subpass];
9050    const VkSubpassDescription &secondary_desc = secondaryPass->pCreateInfo->pSubpasses[subpass];
9051    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
9052    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
9053        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
9054        if (i < primary_desc.inputAttachmentCount) {
9055            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
9056        }
9057        if (i < secondary_desc.inputAttachmentCount) {
9058            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
9059        }
9060        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_input_attach, secondaryBuffer,
9061                                                     secondaryPass, secondary_input_attach, is_multi);
9062    }
9063    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
9064    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
9065        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
9066        if (i < primary_desc.colorAttachmentCount) {
9067            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
9068        }
9069        if (i < secondary_desc.colorAttachmentCount) {
9070            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
9071        }
9072        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_color_attach, secondaryBuffer,
9073                                                     secondaryPass, secondary_color_attach, is_multi);
9074        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
9075        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
9076            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
9077        }
9078        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
9079            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
9080        }
9081        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_resolve_attach, secondaryBuffer,
9082                                                     secondaryPass, secondary_resolve_attach, is_multi);
9083    }
9084    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
9085    if (primary_desc.pDepthStencilAttachment) {
9086        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
9087    }
9088    if (secondary_desc.pDepthStencilAttachment) {
9089        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
9090    }
9091    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_depthstencil_attach, secondaryBuffer,
9092                                                 secondaryPass, secondary_depthstencil_attach, is_multi);
9093    return skip_call;
9094}
9095
9096static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9097                                            VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass) {
9098    bool skip_call = false;
9099    // Early exit if renderPass objects are identical (and therefore compatible)
9100    if (primaryPass == secondaryPass)
9101        return skip_call;
9102    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9103    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9104    if (primary_data == dev_data->renderPassMap.end() || primary_data->second == nullptr) {
9105        skip_call |=
9106            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9107                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9108                    "vkCmdExecuteCommands() called w/ invalid current Cmd Buffer 0x%p which has invalid render pass 0x%" PRIx64 ".",
9109                    (void *)primaryBuffer, (uint64_t)(primaryPass));
9110        return skip_call;
9111    }
9112    if (secondary_data == dev_data->renderPassMap.end() || secondary_data->second == nullptr) {
9113        skip_call |=
9114            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9115                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9116                    "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%p which has invalid render pass 0x%" PRIx64 ".",
9117                    (void *)secondaryBuffer, (uint64_t)(secondaryPass));
9118        return skip_call;
9119    }
9120    if (primary_data->second->pCreateInfo->subpassCount != secondary_data->second->pCreateInfo->subpassCount) {
9121        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9122                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9123                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p which has a render pass 0x%" PRIx64
9124                             " that is not compatible with the current render pass 0x%" PRIx64 "."
9125                             "They have a different number of subpasses.",
9126                             (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass));
9127        return skip_call;
9128    }
9129    bool is_multi = primary_data->second->pCreateInfo->subpassCount > 1;
9130    for (uint32_t i = 0; i < primary_data->second->pCreateInfo->subpassCount; ++i) {
9131        skip_call |=
9132            validateSubpassCompatibility(dev_data, primaryBuffer, primary_data->second, secondaryBuffer, secondary_data->second, i, is_multi);
9133    }
9134    return skip_call;
9135}
9136
9137static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9138                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
9139    bool skip_call = false;
9140    if (!pSubCB->beginInfo.pInheritanceInfo) {
9141        return skip_call;
9142    }
9143    VkFramebuffer primary_fb = pCB->activeFramebuffer;
9144    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9145    if (secondary_fb != VK_NULL_HANDLE) {
9146        if (primary_fb != secondary_fb) {
9147            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9148                                 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9149                                 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p which has a framebuffer 0x%" PRIx64
9150                                 " that is not compatible with the current framebuffer 0x%" PRIx64 ".",
9151                                 (void *)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb));
9152        }
9153        auto fb_data = dev_data->frameBufferMap.find(secondary_fb);
9154        if (fb_data == dev_data->frameBufferMap.end()) {
9155            skip_call |=
9156                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9157                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9158                                                                          "which has invalid framebuffer 0x%" PRIx64 ".",
9159                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
9160            return skip_call;
9161        }
9162        skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb_data->second.createInfo.renderPass,
9163                                                     secondaryBuffer, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9164    }
9165    return skip_call;
9166}
9167
9168static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9169    bool skipCall = false;
9170    unordered_set<int> activeTypes;
9171    for (auto queryObject : pCB->activeQueries) {
9172        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9173        if (queryPoolData != dev_data->queryPoolMap.end()) {
9174            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9175                pSubCB->beginInfo.pInheritanceInfo) {
9176                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
9177                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
9178                    skipCall |= log_msg(
9179                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9180                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9181                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9182                        "which has invalid active query pool 0x%" PRIx64 ". Pipeline statistics is being queried so the command "
9183                        "buffer must have all bits set on the queryPool.",
9184                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
9185                }
9186            }
9187            activeTypes.insert(queryPoolData->second.createInfo.queryType);
9188        }
9189    }
9190    for (auto queryObject : pSubCB->startedQueries) {
9191        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9192        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
9193            skipCall |=
9194                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9195                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9196                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9197                        "which has invalid active query pool 0x%" PRIx64 "of type %d but a query of that type has been started on "
9198                        "secondary Cmd Buffer 0x%p.",
9199                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
9200                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
9201        }
9202    }
9203    return skipCall;
9204}
9205
9206VKAPI_ATTR void VKAPI_CALL
9207CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
9208    bool skipCall = false;
9209    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9210    std::unique_lock<std::mutex> lock(global_lock);
9211    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9212    if (pCB) {
9213        GLOBAL_CB_NODE *pSubCB = NULL;
9214        for (uint32_t i = 0; i < commandBuffersCount; i++) {
9215            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
9216            if (!pSubCB) {
9217                skipCall |=
9218                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9219                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9220                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p in element %u of pCommandBuffers array.",
9221                            (void *)pCommandBuffers[i], i);
9222            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
9223                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9224                                    __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9225                                    "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
9226                                    "array. All cmd buffers in pCommandBuffers array must be secondary.",
9227                                    (void *)pCommandBuffers[i], i);
9228            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
9229                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
9230                    skipCall |= log_msg(
9231                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9232                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
9233                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
9234                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
9235                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass);
9236                } else {
9237                    // Make sure render pass is compatible with parent command buffer pass if has continue
9238                    skipCall |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->renderPass, pCommandBuffers[i],
9239                                                                pSubCB->beginInfo.pInheritanceInfo->renderPass);
9240                    skipCall |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
9241                }
9242                string errorString = "";
9243                if (!verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->renderPass,
9244                                                     pSubCB->beginInfo.pInheritanceInfo->renderPass, errorString)) {
9245                    skipCall |= log_msg(
9246                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9247                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9248                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
9249                        ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
9250                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
9251                        (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str());
9252                }
9253                //  If framebuffer for secondary CB is not NULL, then it must match FB from vkCmdBeginRenderPass()
9254                //   that this CB will be executed in AND framebuffer must have been created w/ RP compatible w/ renderpass
9255                if (pSubCB->beginInfo.pInheritanceInfo->framebuffer) {
9256                    if (pSubCB->beginInfo.pInheritanceInfo->framebuffer != pCB->activeRenderPassBeginInfo.framebuffer) {
9257                        skipCall |= log_msg(
9258                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9259                            (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
9260                            "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) references framebuffer (0x%" PRIxLEAST64
9261                            ") that does not match framebuffer (0x%" PRIxLEAST64 ") in active renderpass (0x%" PRIxLEAST64 ").",
9262                            (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer,
9263                            (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass->renderPass);
9264                    }
9265                }
9266            }
9267            // TODO(mlentine): Move more logic into this method
9268            skipCall |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
9269            skipCall |= validateCommandBufferState(dev_data, pSubCB);
9270            // Secondary cmdBuffers are considered pending execution starting w/
9271            // being recorded
9272            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
9273                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
9274                    skipCall |= log_msg(
9275                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9276                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9277                        "Attempt to simultaneously execute CB 0x%" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
9278                        "set!",
9279                        (uint64_t)(pCB->commandBuffer));
9280                }
9281                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
9282                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
9283                    skipCall |= log_msg(
9284                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9285                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9286                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIxLEAST64
9287                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
9288                        "(0x%" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
9289                                          "set, even though it does.",
9290                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
9291                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
9292                }
9293            }
9294            if (!pCB->activeQueries.empty() && !dev_data->phys_dev_properties.features.inheritedQueries) {
9295                skipCall |=
9296                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9297                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
9298                            "vkCmdExecuteCommands(): Secondary Command Buffer "
9299                            "(0x%" PRIxLEAST64 ") cannot be submitted with a query in "
9300                            "flight and inherited queries not "
9301                            "supported on this device.",
9302                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
9303            }
9304            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
9305            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
9306            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
9307        }
9308        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
9309        skipCall |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
9310    }
9311    lock.unlock();
9312    if (!skipCall)
9313        dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
9314}
9315
9316static bool ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) {
9317    bool skip_call = false;
9318    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9319    auto mem_data = dev_data->memObjMap.find(mem);
9320    if ((mem_data != dev_data->memObjMap.end()) && (mem_data->second.image != VK_NULL_HANDLE)) {
9321        std::vector<VkImageLayout> layouts;
9322        if (FindLayouts(dev_data, mem_data->second.image, layouts)) {
9323            for (auto layout : layouts) {
9324                if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
9325                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9326                                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
9327                                                                                         "GENERAL or PREINITIALIZED are supported.",
9328                                         string_VkImageLayout(layout));
9329                }
9330            }
9331        }
9332    }
9333    return skip_call;
9334}
9335
9336VKAPI_ATTR VkResult VKAPI_CALL
9337MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
9338    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9339
9340    bool skip_call = false;
9341    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9342    std::unique_lock<std::mutex> lock(global_lock);
9343#if MTMERGESOURCE
9344    DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
9345    if (pMemObj) {
9346        pMemObj->valid = true;
9347        if ((dev_data->phys_dev_mem_props.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags &
9348             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
9349            skip_call =
9350                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9351                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
9352                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
9353        }
9354    }
9355    skip_call |= validateMemRange(dev_data, mem, offset, size);
9356#endif
9357    skip_call |= ValidateMapImageLayouts(device, mem);
9358    lock.unlock();
9359
9360    if (!skip_call) {
9361        result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData);
9362        if (VK_SUCCESS == result) {
9363#if MTMERGESOURCE
9364            lock.lock();
9365            storeMemRanges(dev_data, mem, offset, size);
9366            initializeAndTrackMemory(dev_data, mem, size, ppData);
9367            lock.unlock();
9368#endif
9369        }
9370    }
9371    return result;
9372}
9373
9374VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
9375    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9376    bool skipCall = false;
9377
9378    std::unique_lock<std::mutex> lock(global_lock);
9379    skipCall |= deleteMemRanges(my_data, mem);
9380    lock.unlock();
9381    if (!skipCall) {
9382        my_data->device_dispatch_table->UnmapMemory(device, mem);
9383    }
9384}
9385
9386static bool validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount,
9387                                   const VkMappedMemoryRange *pMemRanges) {
9388    bool skipCall = false;
9389    for (uint32_t i = 0; i < memRangeCount; ++i) {
9390        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
9391        if (mem_element != my_data->memObjMap.end()) {
9392            if (mem_element->second.memRange.offset > pMemRanges[i].offset) {
9393                skipCall |= log_msg(
9394                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9395                    (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
9396                    "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
9397                    "(" PRINTF_SIZE_T_SPECIFIER ").",
9398                    funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_element->second.memRange.offset));
9399            }
9400            if ((mem_element->second.memRange.size != VK_WHOLE_SIZE) &&
9401                ((mem_element->second.memRange.offset + mem_element->second.memRange.size) <
9402                 (pMemRanges[i].offset + pMemRanges[i].size))) {
9403                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9404                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9405                                    MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
9406                                                                 ") exceeds the Memory Object's upper-bound "
9407                                                                 "(" PRINTF_SIZE_T_SPECIFIER ").",
9408                                    funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
9409                                    static_cast<size_t>(mem_element->second.memRange.offset + mem_element->second.memRange.size));
9410            }
9411        }
9412    }
9413    return skipCall;
9414}
9415
9416static bool validateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount,
9417                                                     const VkMappedMemoryRange *pMemRanges) {
9418    bool skipCall = false;
9419    for (uint32_t i = 0; i < memRangeCount; ++i) {
9420        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
9421        if (mem_element != my_data->memObjMap.end()) {
9422            if (mem_element->second.pData) {
9423                VkDeviceSize size = mem_element->second.memRange.size;
9424                VkDeviceSize half_size = (size / 2);
9425                char *data = static_cast<char *>(mem_element->second.pData);
9426                for (auto j = 0; j < half_size; ++j) {
9427                    if (data[j] != NoncoherentMemoryFillValue) {
9428                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9429                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9430                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj 0x%" PRIxLEAST64,
9431                                            (uint64_t)pMemRanges[i].memory);
9432                    }
9433                }
9434                for (auto j = size + half_size; j < 2 * size; ++j) {
9435                    if (data[j] != NoncoherentMemoryFillValue) {
9436                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9437                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9438                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj 0x%" PRIxLEAST64,
9439                                            (uint64_t)pMemRanges[i].memory);
9440                    }
9441                }
9442                memcpy(mem_element->second.pDriverData, static_cast<void *>(data + (size_t)(half_size)), (size_t)(size));
9443            }
9444        }
9445    }
9446    return skipCall;
9447}
9448
9449VkResult VKAPI_CALL
9450FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
9451    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9452    bool skipCall = false;
9453    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9454
9455    std::unique_lock<std::mutex> lock(global_lock);
9456    skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
9457    skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
9458    lock.unlock();
9459    if (!skipCall) {
9460        result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
9461    }
9462    return result;
9463}
9464
9465VkResult VKAPI_CALL
9466InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
9467    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9468    bool skipCall = false;
9469    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9470
9471    std::unique_lock<std::mutex> lock(global_lock);
9472    skipCall |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
9473    lock.unlock();
9474    if (!skipCall) {
9475        result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
9476    }
9477    return result;
9478}
9479
9480VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
9481    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9482    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9483    bool skipCall = false;
9484    std::unique_lock<std::mutex> lock(global_lock);
9485    auto image_node = dev_data->imageMap.find(image);
9486    if (image_node != dev_data->imageMap.end()) {
9487        // Track objects tied to memory
9488        uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
9489        skipCall = set_mem_binding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
9490        VkMemoryRequirements memRequirements;
9491        lock.unlock();
9492        dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements);
9493        lock.lock();
9494
9495        // Track and validate bound memory range information
9496        const auto &memEntry = dev_data->memObjMap.find(mem);
9497        if (memEntry != dev_data->memObjMap.end()) {
9498            const MEMORY_RANGE range =
9499                insert_memory_ranges(image_handle, mem, memoryOffset, memRequirements, memEntry->second.imageRanges);
9500            skipCall |=
9501                validate_memory_range(dev_data, memEntry->second.bufferRanges, range, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
9502        }
9503
9504        print_mem_list(dev_data);
9505        lock.unlock();
9506        if (!skipCall) {
9507            result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset);
9508            lock.lock();
9509            dev_data->memObjMap[mem].image = image;
9510            image_node->second.mem = mem;
9511            image_node->second.memOffset = memoryOffset;
9512            image_node->second.memSize = memRequirements.size;
9513            lock.unlock();
9514        }
9515    } else {
9516        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9517                reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
9518                "vkBindImageMemory: Cannot find invalid image 0x%" PRIx64 ", has it already been deleted?",
9519                reinterpret_cast<const uint64_t &>(image));
9520    }
9521    return result;
9522}
9523
9524VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
9525    bool skip_call = false;
9526    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9527    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9528    std::unique_lock<std::mutex> lock(global_lock);
9529    auto event_node = dev_data->eventMap.find(event);
9530    if (event_node != dev_data->eventMap.end()) {
9531        event_node->second.needsSignaled = false;
9532        event_node->second.stageMask = VK_PIPELINE_STAGE_HOST_BIT;
9533        if (event_node->second.in_use.load()) {
9534            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
9535                                 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9536                                 "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
9537                                 reinterpret_cast<const uint64_t &>(event));
9538        }
9539    }
9540    lock.unlock();
9541    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
9542    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
9543    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
9544    for (auto queue_data : dev_data->queueMap) {
9545        auto event_entry = queue_data.second.eventToStageMap.find(event);
9546        if (event_entry != queue_data.second.eventToStageMap.end()) {
9547            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
9548        }
9549    }
9550    if (!skip_call)
9551        result = dev_data->device_dispatch_table->SetEvent(device, event);
9552    return result;
9553}
9554
9555VKAPI_ATTR VkResult VKAPI_CALL
9556QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
9557    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
9558    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9559    bool skip_call = false;
9560    std::unique_lock<std::mutex> lock(global_lock);
9561    // First verify that fence is not in use
9562    if (fence != VK_NULL_HANDLE) {
9563        trackCommandBuffers(dev_data, queue, 0, nullptr, fence);
9564        auto fence_data = dev_data->fenceMap.find(fence);
9565        if ((bindInfoCount != 0) && fence_data->second.in_use.load()) {
9566            skip_call |=
9567                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
9568                        reinterpret_cast<uint64_t &>(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
9569                        "Fence 0x%" PRIx64 " is already in use by another submission.", reinterpret_cast<uint64_t &>(fence));
9570        }
9571        if (!fence_data->second.needsSignaled) {
9572            skip_call |=
9573                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
9574                        reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
9575                        "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
9576                        reinterpret_cast<uint64_t &>(fence));
9577        }
9578    }
9579    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
9580        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
9581        // Track objects tied to memory
9582        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
9583            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
9584                if (set_sparse_mem_binding(dev_data, bindInfo.pBufferBinds[j].pBinds[k].memory,
9585                                           (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
9586                                           "vkQueueBindSparse"))
9587                    skip_call = true;
9588            }
9589        }
9590        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
9591            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
9592                if (set_sparse_mem_binding(dev_data, bindInfo.pImageOpaqueBinds[j].pBinds[k].memory,
9593                                           (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9594                                           "vkQueueBindSparse"))
9595                    skip_call = true;
9596            }
9597        }
9598        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
9599            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
9600                if (set_sparse_mem_binding(dev_data, bindInfo.pImageBinds[j].pBinds[k].memory,
9601                                           (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9602                                           "vkQueueBindSparse"))
9603                    skip_call = true;
9604            }
9605        }
9606        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
9607            const VkSemaphore &semaphore = bindInfo.pWaitSemaphores[i];
9608            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
9609                if (dev_data->semaphoreMap[semaphore].signaled) {
9610                    dev_data->semaphoreMap[semaphore].signaled = false;
9611                } else {
9612                    skip_call |=
9613                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9614                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9615                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64
9616                                " that has no way to be signaled.",
9617                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
9618                }
9619            }
9620        }
9621        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
9622            const VkSemaphore &semaphore = bindInfo.pSignalSemaphores[i];
9623            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
9624                if (dev_data->semaphoreMap[semaphore].signaled) {
9625                    skip_call =
9626                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9627                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9628                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
9629                                ", but that semaphore is already signaled.",
9630                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
9631                }
9632                dev_data->semaphoreMap[semaphore].signaled = true;
9633            }
9634        }
9635    }
9636    print_mem_list(dev_data);
9637    lock.unlock();
9638
9639    if (!skip_call)
9640        return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
9641
9642    return result;
9643}
9644
9645VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
9646                                               const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
9647    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9648    VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
9649    if (result == VK_SUCCESS) {
9650        std::lock_guard<std::mutex> lock(global_lock);
9651        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
9652        sNode->signaled = false;
9653        sNode->queue = VK_NULL_HANDLE;
9654        sNode->in_use.store(0);
9655    }
9656    return result;
9657}
9658
9659VKAPI_ATTR VkResult VKAPI_CALL
9660CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
9661    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9662    VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
9663    if (result == VK_SUCCESS) {
9664        std::lock_guard<std::mutex> lock(global_lock);
9665        dev_data->eventMap[*pEvent].needsSignaled = false;
9666        dev_data->eventMap[*pEvent].in_use.store(0);
9667        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
9668    }
9669    return result;
9670}
9671
9672VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
9673                                                  const VkAllocationCallbacks *pAllocator,
9674                                                  VkSwapchainKHR *pSwapchain) {
9675    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9676    VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
9677
9678    if (VK_SUCCESS == result) {
9679        SWAPCHAIN_NODE *psc_node = new SWAPCHAIN_NODE(pCreateInfo);
9680        std::lock_guard<std::mutex> lock(global_lock);
9681        dev_data->device_extensions.swapchainMap[*pSwapchain] = psc_node;
9682    }
9683
9684    return result;
9685}
9686
9687VKAPI_ATTR void VKAPI_CALL
9688DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
9689    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9690    bool skipCall = false;
9691
9692    std::unique_lock<std::mutex> lock(global_lock);
9693    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(swapchain);
9694    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
9695        if (swapchain_data->second->images.size() > 0) {
9696            for (auto swapchain_image : swapchain_data->second->images) {
9697                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
9698                if (image_sub != dev_data->imageSubresourceMap.end()) {
9699                    for (auto imgsubpair : image_sub->second) {
9700                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
9701                        if (image_item != dev_data->imageLayoutMap.end()) {
9702                            dev_data->imageLayoutMap.erase(image_item);
9703                        }
9704                    }
9705                    dev_data->imageSubresourceMap.erase(image_sub);
9706                }
9707                skipCall = clear_object_binding(dev_data, (uint64_t)swapchain_image,
9708                                                VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
9709                dev_data->imageMap.erase(swapchain_image);
9710            }
9711        }
9712        delete swapchain_data->second;
9713        dev_data->device_extensions.swapchainMap.erase(swapchain);
9714    }
9715    lock.unlock();
9716    if (!skipCall)
9717        dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator);
9718}
9719
9720VKAPI_ATTR VkResult VKAPI_CALL
9721GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
9722    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9723    VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
9724
9725    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
9726        // This should never happen and is checked by param checker.
9727        if (!pCount)
9728            return result;
9729        std::lock_guard<std::mutex> lock(global_lock);
9730        const size_t count = *pCount;
9731        auto swapchain_node = dev_data->device_extensions.swapchainMap[swapchain];
9732        if (!swapchain_node->images.empty()) {
9733            // TODO : Not sure I like the memcmp here, but it works
9734            const bool mismatch = (swapchain_node->images.size() != count ||
9735                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
9736            if (mismatch) {
9737                // TODO: Verify against Valid Usage section of extension
9738                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
9739                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
9740                        "vkGetSwapchainInfoKHR(0x%" PRIx64
9741                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
9742                        (uint64_t)(swapchain));
9743            }
9744        }
9745        for (uint32_t i = 0; i < *pCount; ++i) {
9746            IMAGE_LAYOUT_NODE image_layout_node;
9747            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
9748            image_layout_node.format = swapchain_node->createInfo.imageFormat;
9749            auto &image_node = dev_data->imageMap[pSwapchainImages[i]];
9750            image_node.createInfo.mipLevels = 1;
9751            image_node.createInfo.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
9752            image_node.createInfo.usage = swapchain_node->createInfo.imageUsage;
9753            image_node.createInfo.format = swapchain_node->createInfo.imageFormat;
9754            image_node.createInfo.extent.width = swapchain_node->createInfo.imageExtent.width;
9755            image_node.createInfo.extent.height = swapchain_node->createInfo.imageExtent.height;
9756            image_node.createInfo.sharingMode = swapchain_node->createInfo.imageSharingMode;
9757            image_node.valid = false;
9758            image_node.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
9759            swapchain_node->images.push_back(pSwapchainImages[i]);
9760            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
9761            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
9762            dev_data->imageLayoutMap[subpair] = image_layout_node;
9763            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
9764        }
9765    }
9766    return result;
9767}
9768
9769VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
9770    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
9771    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9772    bool skip_call = false;
9773
9774    if (pPresentInfo) {
9775        std::lock_guard<std::mutex> lock(global_lock);
9776        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
9777            const VkSemaphore &semaphore = pPresentInfo->pWaitSemaphores[i];
9778            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
9779                if (dev_data->semaphoreMap[semaphore].signaled) {
9780                    dev_data->semaphoreMap[semaphore].signaled = false;
9781                } else {
9782                    skip_call |=
9783                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9784                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9785                                "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
9786                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
9787                }
9788            }
9789        }
9790        VkDeviceMemory mem;
9791        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
9792            auto swapchain_data = dev_data->device_extensions.swapchainMap.find(pPresentInfo->pSwapchains[i]);
9793            if (swapchain_data != dev_data->device_extensions.swapchainMap.end() &&
9794                pPresentInfo->pImageIndices[i] < swapchain_data->second->images.size()) {
9795                VkImage image = swapchain_data->second->images[pPresentInfo->pImageIndices[i]];
9796#if MTMERGESOURCE
9797                skip_call |=
9798                    get_mem_binding_from_object(dev_data, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
9799                skip_call |= validate_memory_is_valid(dev_data, mem, "vkQueuePresentKHR()", image);
9800#endif
9801                vector<VkImageLayout> layouts;
9802                if (FindLayouts(dev_data, image, layouts)) {
9803                    for (auto layout : layouts) {
9804                        if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
9805                            skip_call |=
9806                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
9807                                        reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9808                                        "Images passed to present must be in layout "
9809                                        "PRESENT_SOURCE_KHR but is in %s",
9810                                        string_VkImageLayout(layout));
9811                        }
9812                    }
9813                }
9814            }
9815        }
9816    }
9817
9818    if (!skip_call)
9819        result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo);
9820
9821    return result;
9822}
9823
9824VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
9825                                                   VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
9826    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9827    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9828    bool skipCall = false;
9829
9830    std::unique_lock<std::mutex> lock(global_lock);
9831    if (semaphore != VK_NULL_HANDLE &&
9832        dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
9833        if (dev_data->semaphoreMap[semaphore].signaled) {
9834            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
9835                               reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9836                               "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
9837        }
9838        dev_data->semaphoreMap[semaphore].signaled = true;
9839    }
9840    auto fence_data = dev_data->fenceMap.find(fence);
9841    if (fence_data != dev_data->fenceMap.end()) {
9842        fence_data->second.swapchain = swapchain;
9843    }
9844    lock.unlock();
9845
9846    if (!skipCall) {
9847        result =
9848            dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
9849    }
9850
9851    return result;
9852}
9853
9854VKAPI_ATTR VkResult VKAPI_CALL
9855CreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
9856                             const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
9857    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
9858    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
9859    VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
9860    if (VK_SUCCESS == res) {
9861        std::lock_guard<std::mutex> lock(global_lock);
9862        res = layer_create_msg_callback(my_data->report_data, pCreateInfo, pAllocator, pMsgCallback);
9863    }
9864    return res;
9865}
9866
9867VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance,
9868                                                         VkDebugReportCallbackEXT msgCallback,
9869                                                         const VkAllocationCallbacks *pAllocator) {
9870    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
9871    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
9872    pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
9873    std::lock_guard<std::mutex> lock(global_lock);
9874    layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
9875}
9876
9877VKAPI_ATTR void VKAPI_CALL
9878DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
9879                      size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
9880    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
9881    my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
9882                                                            pMsg);
9883}
9884
9885VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
9886                                                                  const char *pLayerName, uint32_t *pCount,
9887                                                                  VkExtensionProperties *pProperties) {
9888    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
9889        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
9890
9891    assert(physicalDevice);
9892
9893    dispatch_key key = get_dispatch_key(physicalDevice);
9894    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
9895    return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
9896}
9897
9898static PFN_vkVoidFunction
9899intercept_core_instance_command(const char *name);
9900
9901static PFN_vkVoidFunction
9902intercept_core_device_command(const char *name);
9903
9904static PFN_vkVoidFunction
9905intercept_khr_swapchain_command(const char *name, VkDevice dev);
9906
9907VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) {
9908    PFN_vkVoidFunction proc = intercept_core_device_command(funcName);
9909    if (proc)
9910        return proc;
9911
9912    assert(dev);
9913
9914    proc = intercept_khr_swapchain_command(funcName, dev);
9915    if (proc)
9916        return proc;
9917
9918    layer_data *dev_data;
9919    dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
9920
9921    VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
9922    {
9923        if (pTable->GetDeviceProcAddr == NULL)
9924            return NULL;
9925        return pTable->GetDeviceProcAddr(dev, funcName);
9926    }
9927}
9928
9929VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
9930    PFN_vkVoidFunction proc = intercept_core_instance_command(funcName);
9931    if (!proc)
9932        proc = intercept_core_device_command(funcName);
9933    if (!proc)
9934        proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE);
9935    if (proc)
9936        return proc;
9937
9938    assert(instance);
9939
9940    layer_data *my_data;
9941    my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
9942    proc = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
9943    if (proc)
9944        return proc;
9945
9946    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
9947    if (pTable->GetInstanceProcAddr == NULL)
9948        return NULL;
9949    return pTable->GetInstanceProcAddr(instance, funcName);
9950}
9951
9952static PFN_vkVoidFunction
9953intercept_core_instance_command(const char *name) {
9954    static const struct {
9955        const char *name;
9956        PFN_vkVoidFunction proc;
9957    } core_instance_commands[] = {
9958        { "vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr) },
9959        { "vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr) },
9960        { "vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance) },
9961        { "vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice) },
9962        { "vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance) },
9963        { "vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties) },
9964    };
9965
9966    // we should never be queried for these commands
9967    assert(strcmp(name, "vkEnumerateInstanceLayerProperties") &&
9968           strcmp(name, "vkEnumerateInstanceExtensionProperties") &&
9969           strcmp(name, "vkEnumerateDeviceLayerProperties"));
9970
9971    for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) {
9972        if (!strcmp(core_instance_commands[i].name, name))
9973            return core_instance_commands[i].proc;
9974    }
9975
9976    return nullptr;
9977}
9978
9979static PFN_vkVoidFunction
9980intercept_core_device_command(const char *name) {
9981    static const struct {
9982        const char *name;
9983        PFN_vkVoidFunction proc;
9984    } core_device_commands[] = {
9985        {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
9986        {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)},
9987        {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)},
9988        {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)},
9989        {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)},
9990        {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)},
9991        {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)},
9992        {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
9993        {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)},
9994        {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)},
9995        {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)},
9996        {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)},
9997        {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)},
9998        {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)},
9999        {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)},
10000        {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)},
10001        {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)},
10002        {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)},
10003        {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)},
10004        {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)},
10005        {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)},
10006        {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)},
10007        {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)},
10008        {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)},
10009        {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)},
10010        {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)},
10011        {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)},
10012        {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)},
10013        {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)},
10014        {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)},
10015        {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)},
10016        {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)},
10017        {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)},
10018        {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)},
10019        {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)},
10020        {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)},
10021        {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)},
10022        {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)},
10023        {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)},
10024        {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)},
10025        {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)},
10026        {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)},
10027        {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)},
10028        {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)},
10029        {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)},
10030        {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)},
10031        {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)},
10032        {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)},
10033        {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)},
10034        {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)},
10035        {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)},
10036        {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)},
10037        {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)},
10038        {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)},
10039        {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)},
10040        {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)},
10041        {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)},
10042        {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)},
10043        {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)},
10044        {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)},
10045        {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)},
10046        {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)},
10047        {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)},
10048        {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)},
10049        {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)},
10050        {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)},
10051        {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)},
10052        {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)},
10053        {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)},
10054        {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)},
10055        {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)},
10056        {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)},
10057        {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)},
10058        {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)},
10059        {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)},
10060        {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)},
10061        {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)},
10062        {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)},
10063        {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)},
10064        {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)},
10065        {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)},
10066        {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)},
10067        {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)},
10068        {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)},
10069        {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)},
10070        {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)},
10071        {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)},
10072        {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)},
10073        {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)},
10074        {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)},
10075        {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)},
10076        {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)},
10077        {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)},
10078        {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)},
10079        {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)},
10080        {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)},
10081        {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)},
10082        {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)},
10083        {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)},
10084        {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)},
10085        {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)},
10086        {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)},
10087        {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)},
10088        {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)},
10089        {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)},
10090        {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)},
10091        {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)},
10092        {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)},
10093        {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)},
10094        {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)},
10095        {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)},
10096        {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)},
10097        {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)},
10098        {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)},
10099        {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)},
10100        {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)},
10101    };
10102
10103    for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) {
10104        if (!strcmp(core_device_commands[i].name, name))
10105            return core_device_commands[i].proc;
10106    }
10107
10108    return nullptr;
10109}
10110
10111static PFN_vkVoidFunction
10112intercept_khr_swapchain_command(const char *name, VkDevice dev) {
10113    static const struct {
10114        const char *name;
10115        PFN_vkVoidFunction proc;
10116    } khr_swapchain_commands[] = {
10117        { "vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR) },
10118        { "vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR) },
10119        { "vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR) },
10120        { "vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR) },
10121        { "vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR) },
10122    };
10123
10124    if (dev) {
10125        layer_data *dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
10126        if (!dev_data->device_extensions.wsi_enabled)
10127            return nullptr;
10128    }
10129
10130    for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) {
10131        if (!strcmp(khr_swapchain_commands[i].name, name))
10132            return khr_swapchain_commands[i].proc;
10133    }
10134
10135    return nullptr;
10136}
10137
10138} // namespace core_validation
10139
10140// vk_layer_logging.h expects these to be defined
10141
10142VKAPI_ATTR VkResult VKAPI_CALL
10143vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10144                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
10145    return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10146}
10147
10148VKAPI_ATTR void VKAPI_CALL
10149vkDestroyDebugReportCallbackEXT(VkInstance instance,
10150                                VkDebugReportCallbackEXT msgCallback,
10151                                const VkAllocationCallbacks *pAllocator) {
10152    core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10153}
10154
10155VKAPI_ATTR void VKAPI_CALL
10156vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
10157                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10158    core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
10159}
10160
10161// loader-layer interface v0
10162
10163VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10164vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
10165    return util_GetExtensionProperties(1, core_validation::instance_extensions, pCount, pProperties);
10166}
10167
10168VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10169vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
10170    return util_GetLayerProperties(1, &core_validation::global_layer, pCount, pProperties);
10171}
10172
10173VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10174vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
10175    return util_GetLayerProperties(1, &core_validation::global_layer, pCount, pProperties);
10176}
10177
10178VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
10179                                                                                    const char *pLayerName, uint32_t *pCount,
10180                                                                                    VkExtensionProperties *pProperties) {
10181    // the layer command handles VK_NULL_HANDLE just fine
10182    return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
10183}
10184
10185VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10186    return core_validation::GetDeviceProcAddr(dev, funcName);
10187}
10188
10189VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
10190    if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties"))
10191        return reinterpret_cast<PFN_vkVoidFunction>(vkEnumerateInstanceLayerProperties);
10192    if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties"))
10193        return reinterpret_cast<PFN_vkVoidFunction>(vkEnumerateDeviceLayerProperties);
10194    if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties"))
10195        return reinterpret_cast<PFN_vkVoidFunction>(vkEnumerateInstanceExtensionProperties);
10196    if (!strcmp(funcName, "vkGetInstanceProcAddr"))
10197        return reinterpret_cast<PFN_vkVoidFunction>(vkGetInstanceProcAddr);
10198
10199    return core_validation::GetInstanceProcAddr(instance, funcName);
10200}
10201