core_validation.cpp revision 386d9a9a77f884789a7ae4c3890aecd47132f2ba
1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 */
26
27// Allow use of STL min and max functions in Windows
28#define NOMINMAX
29
30// Turn on mem_tracker merged code
31#define MTMERGESOURCE 1
32
33#include <SPIRV/spirv.hpp>
34#include <algorithm>
35#include <assert.h>
36#include <iostream>
37#include <list>
38#include <map>
39#include <mutex>
40#include <set>
41#include <stdio.h>
42#include <stdlib.h>
43#include <string.h>
44#include <string>
45#include <unordered_map>
46#include <unordered_set>
47
48#include "vk_loader_platform.h"
49#include "vk_dispatch_table_helper.h"
50#include "vk_struct_string_helper_cpp.h"
51#if defined(__GNUC__)
52#pragma GCC diagnostic ignored "-Wwrite-strings"
53#endif
54#if defined(__GNUC__)
55#pragma GCC diagnostic warning "-Wwrite-strings"
56#endif
57#include "vk_struct_size_helper.h"
58#include "core_validation.h"
59#include "vk_layer_table.h"
60#include "vk_layer_data.h"
61#include "vk_layer_extension_utils.h"
62#include "vk_layer_utils.h"
63#include "spirv-tools/libspirv.h"
64
65#if defined __ANDROID__
66#include <android/log.h>
67#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
68#else
69#define LOGCONSOLE(...) printf(__VA_ARGS__)
70#endif
71
72using std::unordered_map;
73using std::unordered_set;
74
75// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
76// Object value will be used to identify them internally.
77static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
78
79// Track command pools and their command buffers
80struct CMD_POOL_INFO {
81    VkCommandPoolCreateFlags createFlags;
82    uint32_t queueFamilyIndex;
83    list<VkCommandBuffer> commandBuffers; // list container of cmd buffers allocated from this pool
84};
85
86struct devExts {
87    bool wsi_enabled;
88    unordered_map<VkSwapchainKHR, SWAPCHAIN_NODE *> swapchainMap;
89    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
90};
91
92// fwd decls
93struct shader_module;
94
95// TODO : Split this into separate structs for instance and device level data?
96struct layer_data {
97    debug_report_data *report_data;
98    std::vector<VkDebugReportCallbackEXT> logging_callback;
99    VkLayerDispatchTable *device_dispatch_table;
100    VkLayerInstanceDispatchTable *instance_dispatch_table;
101
102    devExts device_extensions;
103    unordered_set<VkQueue> queues;  // all queues under given device
104    // Global set of all cmdBuffers that are inFlight on this device
105    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
106    // Layer specific data
107    unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> sampleMap;
108    unordered_map<VkImageView, VkImageViewCreateInfo> imageViewMap;
109    unordered_map<VkImage, IMAGE_NODE> imageMap;
110    unordered_map<VkBufferView, VkBufferViewCreateInfo> bufferViewMap;
111    unordered_map<VkBuffer, BUFFER_NODE> bufferMap;
112    unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
113    unordered_map<VkCommandPool, CMD_POOL_INFO> commandPoolMap;
114    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
115    unordered_map<VkDescriptorSet, SET_NODE *> setMap;
116    unordered_map<VkDescriptorSetLayout, DescriptorSetLayout *> descriptorSetLayoutMap;
117    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
118    unordered_map<VkDeviceMemory, DEVICE_MEM_INFO> memObjMap;
119    unordered_map<VkFence, FENCE_NODE> fenceMap;
120    unordered_map<VkQueue, QUEUE_NODE> queueMap;
121    unordered_map<VkEvent, EVENT_NODE> eventMap;
122    unordered_map<QueryObject, bool> queryToStateMap;
123    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
124    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
125    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
126    unordered_map<VkFramebuffer, FRAMEBUFFER_NODE> frameBufferMap;
127    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
128    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
129    unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
130    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
131    VkDevice device;
132
133    // Device specific data
134    PHYS_DEV_PROPERTIES_NODE phys_dev_properties;
135    VkPhysicalDeviceMemoryProperties phys_dev_mem_props;
136
137    layer_data()
138        : report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr), device_extensions(),
139          device(VK_NULL_HANDLE), phys_dev_properties{}, phys_dev_mem_props{} {};
140};
141
142// TODO : Do we need to guard access to layer_data_map w/ lock?
143static unordered_map<void *, layer_data *> layer_data_map;
144
145static const VkLayerProperties cv_global_layers[] = {{
146    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
147}};
148
149template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
150    bool foundLayer = false;
151    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
152        if (!strcmp(createInfo.ppEnabledLayerNames[i], cv_global_layers[0].layerName)) {
153            foundLayer = true;
154        }
155        // This has to be logged to console as we don't have a callback at this point.
156        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
157            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
158                       cv_global_layers[0].layerName);
159        }
160    }
161}
162
163// Code imported from shader_checker
164static void build_def_index(shader_module *);
165
166// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
167// without the caller needing to care too much about the physical SPIRV module layout.
168struct spirv_inst_iter {
169    std::vector<uint32_t>::const_iterator zero;
170    std::vector<uint32_t>::const_iterator it;
171
172    uint32_t len() { return *it >> 16; }
173    uint32_t opcode() { return *it & 0x0ffffu; }
174    uint32_t const &word(unsigned n) { return it[n]; }
175    uint32_t offset() { return (uint32_t)(it - zero); }
176
177    spirv_inst_iter() {}
178
179    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
180
181    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
182
183    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
184
185    spirv_inst_iter operator++(int) { /* x++ */
186        spirv_inst_iter ii = *this;
187        it += len();
188        return ii;
189    }
190
191    spirv_inst_iter operator++() { /* ++x; */
192        it += len();
193        return *this;
194    }
195
196    /* The iterator and the value are the same thing. */
197    spirv_inst_iter &operator*() { return *this; }
198    spirv_inst_iter const &operator*() const { return *this; }
199};
200
201struct shader_module {
202    /* the spirv image itself */
203    vector<uint32_t> words;
204    /* a mapping of <id> to the first word of its def. this is useful because walking type
205     * trees, constant expressions, etc requires jumping all over the instruction stream.
206     */
207    unordered_map<unsigned, unsigned> def_index;
208
209    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
210        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
211          def_index() {
212
213        build_def_index(this);
214    }
215
216    /* expose begin() / end() to enable range-based for */
217    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
218    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
219    /* given an offset into the module, produce an iterator there. */
220    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
221
222    /* gets an iterator to the definition of an id */
223    spirv_inst_iter get_def(unsigned id) const {
224        auto it = def_index.find(id);
225        if (it == def_index.end()) {
226            return end();
227        }
228        return at(it->second);
229    }
230};
231
232// TODO : This can be much smarter, using separate locks for separate global data
233static std::mutex global_lock;
234#if MTMERGESOURCE
235// MTMERGESOURCE - start of direct pull
236static VkDeviceMemory *get_object_mem_binding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
237    switch (type) {
238    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
239        auto it = my_data->imageMap.find(VkImage(handle));
240        if (it != my_data->imageMap.end())
241            return &(*it).second.mem;
242        break;
243    }
244    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
245        auto it = my_data->bufferMap.find(VkBuffer(handle));
246        if (it != my_data->bufferMap.end())
247            return &(*it).second.mem;
248        break;
249    }
250    default:
251        break;
252    }
253    return nullptr;
254}
255// MTMERGESOURCE - end section
256#endif
257template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map);
258
259// prototype
260static GLOBAL_CB_NODE *getCBNode(layer_data *, const VkCommandBuffer);
261
262#if MTMERGESOURCE
263// Helper function to validate correct usage bits set for buffers or images
264//  Verify that (actual & desired) flags != 0 or,
265//   if strict is true, verify that (actual & desired) flags == desired
266//  In case of error, report it via dbg callbacks
267static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict,
268                                     uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
269                                     char const *func_name, char const *usage_str) {
270    bool correct_usage = false;
271    bool skipCall = false;
272    if (strict)
273        correct_usage = ((actual & desired) == desired);
274    else
275        correct_usage = ((actual & desired) != 0);
276    if (!correct_usage) {
277        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
278                           MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s %#" PRIxLEAST64
279                                                               " used by %s. In this case, %s should have %s set during creation.",
280                           ty_str, obj_handle, func_name, ty_str, usage_str);
281    }
282    return skipCall;
283}
284
285// Helper function to validate usage flags for images
286// Pulls image info and then sends actual vs. desired usage off to helper above where
287//  an error will be flagged if usage is not correct
288static bool validate_image_usage_flags(layer_data *dev_data, VkImage image, VkFlags desired, VkBool32 strict,
289                                           char const *func_name, char const *usage_string) {
290    bool skipCall = false;
291    auto const image_node = dev_data->imageMap.find(image);
292    if (image_node != dev_data->imageMap.end()) {
293        skipCall = validate_usage_flags(dev_data, image_node->second.createInfo.usage, desired, strict, (uint64_t)image,
294                                        VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "image", func_name, usage_string);
295    }
296    return skipCall;
297}
298
299// Helper function to validate usage flags for buffers
300// Pulls buffer info and then sends actual vs. desired usage off to helper above where
301//  an error will be flagged if usage is not correct
302static bool validate_buffer_usage_flags(layer_data *dev_data, VkBuffer buffer, VkFlags desired, VkBool32 strict,
303                                            char const *func_name, char const *usage_string) {
304    bool skipCall = false;
305    auto const buffer_node = dev_data->bufferMap.find(buffer);
306    if (buffer_node != dev_data->bufferMap.end()) {
307        skipCall = validate_usage_flags(dev_data, buffer_node->second.createInfo.usage, desired, strict, (uint64_t)buffer,
308                                        VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string);
309    }
310    return skipCall;
311}
312
313// Return ptr to info in map container containing mem, or NULL if not found
314//  Calls to this function should be wrapped in mutex
315static DEVICE_MEM_INFO *get_mem_obj_info(layer_data *dev_data, const VkDeviceMemory mem) {
316    auto item = dev_data->memObjMap.find(mem);
317    if (item != dev_data->memObjMap.end()) {
318        return &(*item).second;
319    } else {
320        return NULL;
321    }
322}
323
324static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
325                             const VkMemoryAllocateInfo *pAllocateInfo) {
326    assert(object != NULL);
327
328    memcpy(&my_data->memObjMap[mem].allocInfo, pAllocateInfo, sizeof(VkMemoryAllocateInfo));
329    // TODO:  Update for real hardware, actually process allocation info structures
330    my_data->memObjMap[mem].allocInfo.pNext = NULL;
331    my_data->memObjMap[mem].object = object;
332    my_data->memObjMap[mem].mem = mem;
333    my_data->memObjMap[mem].image = VK_NULL_HANDLE;
334    my_data->memObjMap[mem].memRange.offset = 0;
335    my_data->memObjMap[mem].memRange.size = 0;
336    my_data->memObjMap[mem].pData = 0;
337    my_data->memObjMap[mem].pDriverData = 0;
338    my_data->memObjMap[mem].valid = false;
339}
340
341static bool validate_memory_is_valid(layer_data *dev_data, VkDeviceMemory mem, const char *functionName,
342                                     VkImage image = VK_NULL_HANDLE) {
343    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
344        auto const image_node = dev_data->imageMap.find(image);
345        if (image_node != dev_data->imageMap.end() && !image_node->second.valid) {
346            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
347                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
348                           "%s: Cannot read invalid swapchain image %" PRIx64 ", please fill the memory before using.",
349                           functionName, (uint64_t)(image));
350        }
351    } else {
352        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
353        if (pMemObj && !pMemObj->valid) {
354            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
355                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
356                           "%s: Cannot read invalid memory %" PRIx64 ", please fill the memory before using.", functionName,
357                           (uint64_t)(mem));
358        }
359    }
360    return false;
361}
362
363static void set_memory_valid(layer_data *dev_data, VkDeviceMemory mem, bool valid, VkImage image = VK_NULL_HANDLE) {
364    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
365        auto image_node = dev_data->imageMap.find(image);
366        if (image_node != dev_data->imageMap.end()) {
367            image_node->second.valid = valid;
368        }
369    } else {
370        DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
371        if (pMemObj) {
372            pMemObj->valid = valid;
373        }
374    }
375}
376
377// Find CB Info and add mem reference to list container
378// Find Mem Obj Info and add CB reference to list container
379static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
380                                              const char *apiName) {
381    bool skipCall = false;
382
383    // Skip validation if this image was created through WSI
384    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
385
386        // First update CB binding in MemObj mini CB list
387        DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
388        if (pMemInfo) {
389            pMemInfo->commandBufferBindings.insert(cb);
390            // Now update CBInfo's Mem reference list
391            GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
392            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
393            if (pCBNode) {
394                pCBNode->memObjs.insert(mem);
395            }
396        }
397    }
398    return skipCall;
399}
400// For every mem obj bound to particular CB, free bindings related to that CB
401static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *pCBNode) {
402    if (pCBNode) {
403        if (pCBNode->memObjs.size() > 0) {
404            for (auto mem : pCBNode->memObjs) {
405                DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
406                if (pInfo) {
407                    pInfo->commandBufferBindings.erase(pCBNode->commandBuffer);
408                }
409            }
410            pCBNode->memObjs.clear();
411        }
412        pCBNode->validate_functions.clear();
413    }
414}
415// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
416static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
417    clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
418}
419
420// For given MemObjInfo, report Obj & CB bindings
421static bool reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
422    bool skipCall = false;
423    size_t cmdBufRefCount = pMemObjInfo->commandBufferBindings.size();
424    size_t objRefCount = pMemObjInfo->objBindings.size();
425
426    if ((pMemObjInfo->commandBufferBindings.size()) != 0) {
427        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
428                           (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
429                           "Attempting to free memory object %#" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
430                           " references",
431                           (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
432    }
433
434    if (cmdBufRefCount > 0 && pMemObjInfo->commandBufferBindings.size() > 0) {
435        for (auto cb : pMemObjInfo->commandBufferBindings) {
436            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
437                    (uint64_t)cb, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
438                    "Command Buffer %p still has a reference to mem obj %#" PRIxLEAST64, cb, (uint64_t)pMemObjInfo->mem);
439        }
440        // Clear the list of hanging references
441        pMemObjInfo->commandBufferBindings.clear();
442    }
443
444    if (objRefCount > 0 && pMemObjInfo->objBindings.size() > 0) {
445        for (auto obj : pMemObjInfo->objBindings) {
446            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__,
447                    MEMTRACK_FREED_MEM_REF, "MEM", "VK Object %#" PRIxLEAST64 " still has a reference to mem obj %#" PRIxLEAST64,
448                    obj.handle, (uint64_t)pMemObjInfo->mem);
449        }
450        // Clear the list of hanging references
451        pMemObjInfo->objBindings.clear();
452    }
453    return skipCall;
454}
455
456static bool deleteMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem) {
457    bool skipCall = false;
458    auto item = my_data->memObjMap.find(mem);
459    if (item != my_data->memObjMap.end()) {
460        my_data->memObjMap.erase(item);
461    } else {
462        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
463                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
464                           "Request to delete memory object %#" PRIxLEAST64 " not present in memory Object Map", (uint64_t)mem);
465    }
466    return skipCall;
467}
468
469static bool freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, bool internal) {
470    bool skipCall = false;
471    // Parse global list to find info w/ mem
472    DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
473    if (pInfo) {
474        if (pInfo->allocInfo.allocationSize == 0 && !internal) {
475            // TODO: Verify against Valid Use section
476            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
477                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
478                               "Attempting to free memory associated with a Persistent Image, %#" PRIxLEAST64 ", "
479                               "this should not be explicitly freed\n",
480                               (uint64_t)mem);
481        } else {
482            // Clear any CB bindings for completed CBs
483            //   TODO : Is there a better place to do this?
484
485            assert(pInfo->object != VK_NULL_HANDLE);
486            // clear_cmd_buf_and_mem_references removes elements from
487            // pInfo->commandBufferBindings -- this copy not needed in c++14,
488            // and probably not needed in practice in c++11
489            auto bindings = pInfo->commandBufferBindings;
490            for (auto cb : bindings) {
491                if (!dev_data->globalInFlightCmdBuffers.count(cb)) {
492                    clear_cmd_buf_and_mem_references(dev_data, cb);
493                }
494            }
495
496            // Now verify that no references to this mem obj remain and remove bindings
497            if (pInfo->commandBufferBindings.size() || pInfo->objBindings.size()) {
498                skipCall |= reportMemReferencesAndCleanUp(dev_data, pInfo);
499            }
500            // Delete mem obj info
501            skipCall |= deleteMemObjInfo(dev_data, object, mem);
502        }
503    }
504    return skipCall;
505}
506
507static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
508    switch (type) {
509    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
510        return "image";
511    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
512        return "buffer";
513    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
514        return "swapchain";
515    default:
516        return "unknown";
517    }
518}
519
520// Remove object binding performs 3 tasks:
521// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
522// 2. Clear mem binding for image/buffer by setting its handle to 0
523// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
524static bool clear_object_binding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
525    // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
526    bool skipCall = false;
527    VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
528    if (pMemBinding) {
529        DEVICE_MEM_INFO *pMemObjInfo = get_mem_obj_info(dev_data, *pMemBinding);
530        // TODO : Make sure this is a reasonable way to reset mem binding
531        *pMemBinding = VK_NULL_HANDLE;
532        if (pMemObjInfo) {
533            // This obj is bound to a memory object. Remove the reference to this object in that memory object's list,
534            // and set the objects memory binding pointer to NULL.
535            if (!pMemObjInfo->objBindings.erase({handle, type})) {
536                skipCall |=
537                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
538                            "MEM", "While trying to clear mem binding for %s obj %#" PRIxLEAST64
539                                   ", unable to find that object referenced by mem obj %#" PRIxLEAST64,
540                            object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
541            }
542        }
543    }
544    return skipCall;
545}
546
547// For NULL mem case, output warning
548// Make sure given object is in global object map
549//  IF a previous binding existed, output validation error
550//  Otherwise, add reference from objectInfo to memoryInfo
551//  Add reference off of objInfo
552static bool set_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
553                                VkDebugReportObjectTypeEXT type, const char *apiName) {
554    bool skipCall = false;
555    // Handle NULL case separately, just clear previous binding & decrement reference
556    if (mem == VK_NULL_HANDLE) {
557        // TODO: Verify against Valid Use section of spec.
558        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
559                           "MEM", "In %s, attempting to Bind Obj(%#" PRIxLEAST64 ") to NULL", apiName, handle);
560    } else {
561        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
562        if (!pMemBinding) {
563            skipCall |=
564                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS,
565                        "MEM", "In %s, attempting to update Binding of %s Obj(%#" PRIxLEAST64 ") that's not in global list",
566                        object_type_to_string(type), apiName, handle);
567        } else {
568            // non-null case so should have real mem obj
569            DEVICE_MEM_INFO *pMemInfo = get_mem_obj_info(dev_data, mem);
570            if (pMemInfo) {
571                DEVICE_MEM_INFO *pPrevBinding = get_mem_obj_info(dev_data, *pMemBinding);
572                if (pPrevBinding != NULL) {
573                    skipCall |=
574                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
575                                (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
576                                "In %s, attempting to bind memory (%#" PRIxLEAST64 ") to object (%#" PRIxLEAST64
577                                ") which has already been bound to mem object %#" PRIxLEAST64,
578                                apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem);
579                } else {
580                    pMemInfo->objBindings.insert({handle, type});
581                    // For image objects, make sure default memory state is correctly set
582                    // TODO : What's the best/correct way to handle this?
583                    if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
584                        auto const image_node = dev_data->imageMap.find(VkImage(handle));
585                        if (image_node != dev_data->imageMap.end()) {
586                            VkImageCreateInfo ici = image_node->second.createInfo;
587                            if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
588                                // TODO::  More memory state transition stuff.
589                            }
590                        }
591                    }
592                    *pMemBinding = mem;
593                }
594            }
595        }
596    }
597    return skipCall;
598}
599
600// For NULL mem case, clear any previous binding Else...
601// Make sure given object is in its object map
602//  IF a previous binding existed, update binding
603//  Add reference from objectInfo to memoryInfo
604//  Add reference off of object's binding info
605// Return VK_TRUE if addition is successful, VK_FALSE otherwise
606static bool set_sparse_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
607                                       VkDebugReportObjectTypeEXT type, const char *apiName) {
608    bool skipCall = VK_FALSE;
609    // Handle NULL case separately, just clear previous binding & decrement reference
610    if (mem == VK_NULL_HANDLE) {
611        skipCall = clear_object_binding(dev_data, handle, type);
612    } else {
613        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
614        if (!pMemBinding) {
615            skipCall |= log_msg(
616                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_MISSING_MEM_BINDINGS, "MEM",
617                "In %s, attempting to update Binding of Obj(%#" PRIxLEAST64 ") that's not in global list()", apiName, handle);
618        } else {
619            // non-null case so should have real mem obj
620            DEVICE_MEM_INFO *pInfo = get_mem_obj_info(dev_data, mem);
621            if (pInfo) {
622                pInfo->objBindings.insert({handle, type});
623                // Need to set mem binding for this object
624                *pMemBinding = mem;
625            }
626        }
627    }
628    return skipCall;
629}
630
631// For given Object, get 'mem' obj that it's bound to or NULL if no binding
632static bool get_mem_binding_from_object(layer_data *dev_data, const uint64_t handle,
633                                            const VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
634    bool skipCall = false;
635    *mem = VK_NULL_HANDLE;
636    VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
637    if (pMemBinding) {
638        *mem = *pMemBinding;
639    } else {
640        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
641                           "MEM", "Trying to get mem binding for object %#" PRIxLEAST64 " but no such object in %s list", handle,
642                           object_type_to_string(type));
643    }
644    return skipCall;
645}
646
647// Print details of MemObjInfo list
648static void print_mem_list(layer_data *dev_data) {
649    DEVICE_MEM_INFO *pInfo = NULL;
650
651    // Early out if info is not requested
652    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
653        return;
654    }
655
656    // Just printing each msg individually for now, may want to package these into single large print
657    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
658            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
659            dev_data->memObjMap.size());
660    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
661            MEMTRACK_NONE, "MEM", "=============================");
662
663    if (dev_data->memObjMap.size() <= 0)
664        return;
665
666    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
667        pInfo = &(*ii).second;
668
669        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
670                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at %p===", (void *)pInfo);
671        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
672                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: %#" PRIxLEAST64, (uint64_t)(pInfo->mem));
673        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
674                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: " PRINTF_SIZE_T_SPECIFIER,
675                pInfo->commandBufferBindings.size() + pInfo->objBindings.size());
676        if (0 != pInfo->allocInfo.allocationSize) {
677            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&pInfo->allocInfo, "MEM(INFO):         ");
678            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
679                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
680        } else {
681            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
682                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
683        }
684
685        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
686                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
687                pInfo->objBindings.size());
688        if (pInfo->objBindings.size() > 0) {
689            for (auto obj : pInfo->objBindings) {
690                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
691                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT %" PRIu64, obj.handle);
692            }
693        }
694
695        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
696                __LINE__, MEMTRACK_NONE, "MEM",
697                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
698                pInfo->commandBufferBindings.size());
699        if (pInfo->commandBufferBindings.size() > 0) {
700            for (auto cb : pInfo->commandBufferBindings) {
701                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
702                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB %p", cb);
703            }
704        }
705    }
706}
707
708static void printCBList(layer_data *my_data) {
709    GLOBAL_CB_NODE *pCBInfo = NULL;
710
711    // Early out if info is not requested
712    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
713        return;
714    }
715
716    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
717            MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
718            my_data->commandBufferMap.size());
719    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
720            MEMTRACK_NONE, "MEM", "==================");
721
722    if (my_data->commandBufferMap.size() <= 0)
723        return;
724
725    for (auto &cb_node : my_data->commandBufferMap) {
726        pCBInfo = cb_node.second;
727
728        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
729                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (%p) has CB %p", (void *)pCBInfo, (void *)pCBInfo->commandBuffer);
730
731        if (pCBInfo->memObjs.size() <= 0)
732            continue;
733        for (auto obj : pCBInfo->memObjs) {
734            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
735                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj %" PRIu64, (uint64_t)obj);
736        }
737    }
738}
739
740#endif
741
742// Return a string representation of CMD_TYPE enum
743static string cmdTypeToString(CMD_TYPE cmd) {
744    switch (cmd) {
745    case CMD_BINDPIPELINE:
746        return "CMD_BINDPIPELINE";
747    case CMD_BINDPIPELINEDELTA:
748        return "CMD_BINDPIPELINEDELTA";
749    case CMD_SETVIEWPORTSTATE:
750        return "CMD_SETVIEWPORTSTATE";
751    case CMD_SETLINEWIDTHSTATE:
752        return "CMD_SETLINEWIDTHSTATE";
753    case CMD_SETDEPTHBIASSTATE:
754        return "CMD_SETDEPTHBIASSTATE";
755    case CMD_SETBLENDSTATE:
756        return "CMD_SETBLENDSTATE";
757    case CMD_SETDEPTHBOUNDSSTATE:
758        return "CMD_SETDEPTHBOUNDSSTATE";
759    case CMD_SETSTENCILREADMASKSTATE:
760        return "CMD_SETSTENCILREADMASKSTATE";
761    case CMD_SETSTENCILWRITEMASKSTATE:
762        return "CMD_SETSTENCILWRITEMASKSTATE";
763    case CMD_SETSTENCILREFERENCESTATE:
764        return "CMD_SETSTENCILREFERENCESTATE";
765    case CMD_BINDDESCRIPTORSETS:
766        return "CMD_BINDDESCRIPTORSETS";
767    case CMD_BINDINDEXBUFFER:
768        return "CMD_BINDINDEXBUFFER";
769    case CMD_BINDVERTEXBUFFER:
770        return "CMD_BINDVERTEXBUFFER";
771    case CMD_DRAW:
772        return "CMD_DRAW";
773    case CMD_DRAWINDEXED:
774        return "CMD_DRAWINDEXED";
775    case CMD_DRAWINDIRECT:
776        return "CMD_DRAWINDIRECT";
777    case CMD_DRAWINDEXEDINDIRECT:
778        return "CMD_DRAWINDEXEDINDIRECT";
779    case CMD_DISPATCH:
780        return "CMD_DISPATCH";
781    case CMD_DISPATCHINDIRECT:
782        return "CMD_DISPATCHINDIRECT";
783    case CMD_COPYBUFFER:
784        return "CMD_COPYBUFFER";
785    case CMD_COPYIMAGE:
786        return "CMD_COPYIMAGE";
787    case CMD_BLITIMAGE:
788        return "CMD_BLITIMAGE";
789    case CMD_COPYBUFFERTOIMAGE:
790        return "CMD_COPYBUFFERTOIMAGE";
791    case CMD_COPYIMAGETOBUFFER:
792        return "CMD_COPYIMAGETOBUFFER";
793    case CMD_CLONEIMAGEDATA:
794        return "CMD_CLONEIMAGEDATA";
795    case CMD_UPDATEBUFFER:
796        return "CMD_UPDATEBUFFER";
797    case CMD_FILLBUFFER:
798        return "CMD_FILLBUFFER";
799    case CMD_CLEARCOLORIMAGE:
800        return "CMD_CLEARCOLORIMAGE";
801    case CMD_CLEARATTACHMENTS:
802        return "CMD_CLEARCOLORATTACHMENT";
803    case CMD_CLEARDEPTHSTENCILIMAGE:
804        return "CMD_CLEARDEPTHSTENCILIMAGE";
805    case CMD_RESOLVEIMAGE:
806        return "CMD_RESOLVEIMAGE";
807    case CMD_SETEVENT:
808        return "CMD_SETEVENT";
809    case CMD_RESETEVENT:
810        return "CMD_RESETEVENT";
811    case CMD_WAITEVENTS:
812        return "CMD_WAITEVENTS";
813    case CMD_PIPELINEBARRIER:
814        return "CMD_PIPELINEBARRIER";
815    case CMD_BEGINQUERY:
816        return "CMD_BEGINQUERY";
817    case CMD_ENDQUERY:
818        return "CMD_ENDQUERY";
819    case CMD_RESETQUERYPOOL:
820        return "CMD_RESETQUERYPOOL";
821    case CMD_COPYQUERYPOOLRESULTS:
822        return "CMD_COPYQUERYPOOLRESULTS";
823    case CMD_WRITETIMESTAMP:
824        return "CMD_WRITETIMESTAMP";
825    case CMD_INITATOMICCOUNTERS:
826        return "CMD_INITATOMICCOUNTERS";
827    case CMD_LOADATOMICCOUNTERS:
828        return "CMD_LOADATOMICCOUNTERS";
829    case CMD_SAVEATOMICCOUNTERS:
830        return "CMD_SAVEATOMICCOUNTERS";
831    case CMD_BEGINRENDERPASS:
832        return "CMD_BEGINRENDERPASS";
833    case CMD_ENDRENDERPASS:
834        return "CMD_ENDRENDERPASS";
835    default:
836        return "UNKNOWN";
837    }
838}
839
840// SPIRV utility functions
841static void build_def_index(shader_module *module) {
842    for (auto insn : *module) {
843        switch (insn.opcode()) {
844        /* Types */
845        case spv::OpTypeVoid:
846        case spv::OpTypeBool:
847        case spv::OpTypeInt:
848        case spv::OpTypeFloat:
849        case spv::OpTypeVector:
850        case spv::OpTypeMatrix:
851        case spv::OpTypeImage:
852        case spv::OpTypeSampler:
853        case spv::OpTypeSampledImage:
854        case spv::OpTypeArray:
855        case spv::OpTypeRuntimeArray:
856        case spv::OpTypeStruct:
857        case spv::OpTypeOpaque:
858        case spv::OpTypePointer:
859        case spv::OpTypeFunction:
860        case spv::OpTypeEvent:
861        case spv::OpTypeDeviceEvent:
862        case spv::OpTypeReserveId:
863        case spv::OpTypeQueue:
864        case spv::OpTypePipe:
865            module->def_index[insn.word(1)] = insn.offset();
866            break;
867
868        /* Fixed constants */
869        case spv::OpConstantTrue:
870        case spv::OpConstantFalse:
871        case spv::OpConstant:
872        case spv::OpConstantComposite:
873        case spv::OpConstantSampler:
874        case spv::OpConstantNull:
875            module->def_index[insn.word(2)] = insn.offset();
876            break;
877
878        /* Specialization constants */
879        case spv::OpSpecConstantTrue:
880        case spv::OpSpecConstantFalse:
881        case spv::OpSpecConstant:
882        case spv::OpSpecConstantComposite:
883        case spv::OpSpecConstantOp:
884            module->def_index[insn.word(2)] = insn.offset();
885            break;
886
887        /* Variables */
888        case spv::OpVariable:
889            module->def_index[insn.word(2)] = insn.offset();
890            break;
891
892        /* Functions */
893        case spv::OpFunction:
894            module->def_index[insn.word(2)] = insn.offset();
895            break;
896
897        default:
898            /* We don't care about any other defs for now. */
899            break;
900        }
901    }
902}
903
904static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
905    for (auto insn : *src) {
906        if (insn.opcode() == spv::OpEntryPoint) {
907            auto entrypointName = (char const *)&insn.word(3);
908            auto entrypointStageBits = 1u << insn.word(1);
909
910            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
911                return insn;
912            }
913        }
914    }
915
916    return src->end();
917}
918
919static char const *storage_class_name(unsigned sc) {
920    switch (sc) {
921    case spv::StorageClassInput:
922        return "input";
923    case spv::StorageClassOutput:
924        return "output";
925    case spv::StorageClassUniformConstant:
926        return "const uniform";
927    case spv::StorageClassUniform:
928        return "uniform";
929    case spv::StorageClassWorkgroup:
930        return "workgroup local";
931    case spv::StorageClassCrossWorkgroup:
932        return "workgroup global";
933    case spv::StorageClassPrivate:
934        return "private global";
935    case spv::StorageClassFunction:
936        return "function";
937    case spv::StorageClassGeneric:
938        return "generic";
939    case spv::StorageClassAtomicCounter:
940        return "atomic counter";
941    case spv::StorageClassImage:
942        return "image";
943    case spv::StorageClassPushConstant:
944        return "push constant";
945    default:
946        return "unknown";
947    }
948}
949
950/* get the value of an integral constant */
951unsigned get_constant_value(shader_module const *src, unsigned id) {
952    auto value = src->get_def(id);
953    assert(value != src->end());
954
955    if (value.opcode() != spv::OpConstant) {
956        /* TODO: Either ensure that the specialization transform is already performed on a module we're
957            considering here, OR -- specialize on the fly now.
958            */
959        return 1;
960    }
961
962    return value.word(3);
963}
964
965
966static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
967    auto insn = src->get_def(type);
968    assert(insn != src->end());
969
970    switch (insn.opcode()) {
971    case spv::OpTypeBool:
972        ss << "bool";
973        break;
974    case spv::OpTypeInt:
975        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
976        break;
977    case spv::OpTypeFloat:
978        ss << "float" << insn.word(2);
979        break;
980    case spv::OpTypeVector:
981        ss << "vec" << insn.word(3) << " of ";
982        describe_type_inner(ss, src, insn.word(2));
983        break;
984    case spv::OpTypeMatrix:
985        ss << "mat" << insn.word(3) << " of ";
986        describe_type_inner(ss, src, insn.word(2));
987        break;
988    case spv::OpTypeArray:
989        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
990        describe_type_inner(ss, src, insn.word(2));
991        break;
992    case spv::OpTypePointer:
993        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
994        describe_type_inner(ss, src, insn.word(3));
995        break;
996    case spv::OpTypeStruct: {
997        ss << "struct of (";
998        for (unsigned i = 2; i < insn.len(); i++) {
999            describe_type_inner(ss, src, insn.word(i));
1000            if (i == insn.len() - 1) {
1001                ss << ")";
1002            } else {
1003                ss << ", ";
1004            }
1005        }
1006        break;
1007    }
1008    case spv::OpTypeSampler:
1009        ss << "sampler";
1010        break;
1011    case spv::OpTypeSampledImage:
1012        ss << "sampler+";
1013        describe_type_inner(ss, src, insn.word(2));
1014        break;
1015    case spv::OpTypeImage:
1016        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1017        break;
1018    default:
1019        ss << "oddtype";
1020        break;
1021    }
1022}
1023
1024
1025static std::string describe_type(shader_module const *src, unsigned type) {
1026    std::ostringstream ss;
1027    describe_type_inner(ss, src, type);
1028    return ss.str();
1029}
1030
1031
1032static bool is_narrow_numeric_type(spirv_inst_iter type)
1033{
1034    if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat)
1035        return false;
1036    return type.word(2) < 64;
1037}
1038
1039
1040static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) {
1041    /* walk two type trees together, and complain about differences */
1042    auto a_insn = a->get_def(a_type);
1043    auto b_insn = b->get_def(b_type);
1044    assert(a_insn != a->end());
1045    assert(b_insn != b->end());
1046
1047    if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1048        return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
1049    }
1050
1051    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1052        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1053        return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1054    }
1055
1056    if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1057        return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
1058    }
1059
1060    if (a_insn.opcode() != b_insn.opcode()) {
1061        return false;
1062    }
1063
1064    if (a_insn.opcode() == spv::OpTypePointer) {
1065        /* match on pointee type. storage class is expected to differ */
1066        return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
1067    }
1068
1069    if (a_arrayed || b_arrayed) {
1070        /* if we havent resolved array-of-verts by here, we're not going to. */
1071        return false;
1072    }
1073
1074    switch (a_insn.opcode()) {
1075    case spv::OpTypeBool:
1076        return true;
1077    case spv::OpTypeInt:
1078        /* match on width, signedness */
1079        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1080    case spv::OpTypeFloat:
1081        /* match on width */
1082        return a_insn.word(2) == b_insn.word(2);
1083    case spv::OpTypeVector:
1084        /* match on element type, count. */
1085        if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false))
1086            return false;
1087        if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1088            return a_insn.word(3) >= b_insn.word(3);
1089        }
1090        else {
1091            return a_insn.word(3) == b_insn.word(3);
1092        }
1093    case spv::OpTypeMatrix:
1094        /* match on element type, count. */
1095        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3);
1096    case spv::OpTypeArray:
1097        /* match on element type, count. these all have the same layout. we don't get here if
1098         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1099         * not a literal within OpTypeArray */
1100        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1101               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1102    case spv::OpTypeStruct:
1103        /* match on all element types */
1104        {
1105            if (a_insn.len() != b_insn.len()) {
1106                return false; /* structs cannot match if member counts differ */
1107            }
1108
1109            for (unsigned i = 2; i < a_insn.len(); i++) {
1110                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
1111                    return false;
1112                }
1113            }
1114
1115            return true;
1116        }
1117    default:
1118        /* remaining types are CLisms, or may not appear in the interfaces we
1119         * are interested in. Just claim no match.
1120         */
1121        return false;
1122    }
1123}
1124
1125static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1126    auto it = map.find(id);
1127    if (it == map.end())
1128        return def;
1129    else
1130        return it->second;
1131}
1132
1133static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1134    auto insn = src->get_def(type);
1135    assert(insn != src->end());
1136
1137    switch (insn.opcode()) {
1138    case spv::OpTypePointer:
1139        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1140         * we're never actually passing pointers around. */
1141        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1142    case spv::OpTypeArray:
1143        if (strip_array_level) {
1144            return get_locations_consumed_by_type(src, insn.word(2), false);
1145        } else {
1146            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1147        }
1148    case spv::OpTypeMatrix:
1149        /* num locations is the dimension * element size */
1150        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1151    case spv::OpTypeVector: {
1152        auto scalar_type = src->get_def(insn.word(2));
1153        auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ?
1154            scalar_type.word(2) : 32;
1155
1156        /* locations are 128-bit wide; 3- and 4-component vectors of 64 bit
1157         * types require two. */
1158        return (bit_width * insn.word(3) + 127) / 128;
1159    }
1160    default:
1161        /* everything else is just 1. */
1162        return 1;
1163
1164        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1165         * multiple locations. */
1166    }
1167}
1168
1169static unsigned get_locations_consumed_by_format(VkFormat format) {
1170    switch (format) {
1171    case VK_FORMAT_R64G64B64A64_SFLOAT:
1172    case VK_FORMAT_R64G64B64A64_SINT:
1173    case VK_FORMAT_R64G64B64A64_UINT:
1174    case VK_FORMAT_R64G64B64_SFLOAT:
1175    case VK_FORMAT_R64G64B64_SINT:
1176    case VK_FORMAT_R64G64B64_UINT:
1177        return 2;
1178    default:
1179        return 1;
1180    }
1181}
1182
1183typedef std::pair<unsigned, unsigned> location_t;
1184typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1185
1186struct interface_var {
1187    uint32_t id;
1188    uint32_t type_id;
1189    uint32_t offset;
1190    bool is_patch;
1191    bool is_block_member;
1192    /* TODO: collect the name, too? Isn't required to be present. */
1193};
1194
1195struct shader_stage_attributes {
1196    char const *const name;
1197    bool arrayed_input;
1198    bool arrayed_output;
1199};
1200
1201static shader_stage_attributes shader_stage_attribs[] = {
1202    {"vertex shader", false, false},
1203    {"tessellation control shader", true, true},
1204    {"tessellation evaluation shader", true, false},
1205    {"geometry shader", true, false},
1206    {"fragment shader", false, false},
1207};
1208
1209static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1210    while (true) {
1211
1212        if (def.opcode() == spv::OpTypePointer) {
1213            def = src->get_def(def.word(3));
1214        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1215            def = src->get_def(def.word(2));
1216            is_array_of_verts = false;
1217        } else if (def.opcode() == spv::OpTypeStruct) {
1218            return def;
1219        } else {
1220            return src->end();
1221        }
1222    }
1223}
1224
1225static void collect_interface_block_members(layer_data *my_data, shader_module const *src,
1226                                            std::map<location_t, interface_var> &out,
1227                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1228                                            uint32_t id, uint32_t type_id, bool is_patch) {
1229    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1230    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1231    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1232        /* this isn't an interface block. */
1233        return;
1234    }
1235
1236    std::unordered_map<unsigned, unsigned> member_components;
1237
1238    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1239    for (auto insn : *src) {
1240        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1241            unsigned member_index = insn.word(2);
1242
1243            if (insn.word(3) == spv::DecorationComponent) {
1244                unsigned component = insn.word(4);
1245                member_components[member_index] = component;
1246            }
1247        }
1248    }
1249
1250    /* Second pass -- produce the output, from Location decorations */
1251    for (auto insn : *src) {
1252        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1253            unsigned member_index = insn.word(2);
1254            unsigned member_type_id = type.word(2 + member_index);
1255
1256            if (insn.word(3) == spv::DecorationLocation) {
1257                unsigned location = insn.word(4);
1258                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1259                auto component_it = member_components.find(member_index);
1260                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1261
1262                for (unsigned int offset = 0; offset < num_locations; offset++) {
1263                    interface_var v;
1264                    v.id = id;
1265                    /* TODO: member index in interface_var too? */
1266                    v.type_id = member_type_id;
1267                    v.offset = offset;
1268                    v.is_patch = is_patch;
1269                    v.is_block_member = true;
1270                    out[std::make_pair(location + offset, component)] = v;
1271                }
1272            }
1273        }
1274    }
1275}
1276
1277static void collect_interface_by_location(layer_data *my_data, shader_module const *src, spirv_inst_iter entrypoint,
1278                                          spv::StorageClass sinterface, std::map<location_t, interface_var> &out,
1279                                          bool is_array_of_verts) {
1280    std::unordered_map<unsigned, unsigned> var_locations;
1281    std::unordered_map<unsigned, unsigned> var_builtins;
1282    std::unordered_map<unsigned, unsigned> var_components;
1283    std::unordered_map<unsigned, unsigned> blocks;
1284    std::unordered_map<unsigned, unsigned> var_patch;
1285
1286    for (auto insn : *src) {
1287
1288        /* We consider two interface models: SSO rendezvous-by-location, and
1289         * builtins. Complain about anything that fits neither model.
1290         */
1291        if (insn.opcode() == spv::OpDecorate) {
1292            if (insn.word(2) == spv::DecorationLocation) {
1293                var_locations[insn.word(1)] = insn.word(3);
1294            }
1295
1296            if (insn.word(2) == spv::DecorationBuiltIn) {
1297                var_builtins[insn.word(1)] = insn.word(3);
1298            }
1299
1300            if (insn.word(2) == spv::DecorationComponent) {
1301                var_components[insn.word(1)] = insn.word(3);
1302            }
1303
1304            if (insn.word(2) == spv::DecorationBlock) {
1305                blocks[insn.word(1)] = 1;
1306            }
1307
1308            if (insn.word(2) == spv::DecorationPatch) {
1309                var_patch[insn.word(1)] = 1;
1310            }
1311        }
1312    }
1313
1314    /* TODO: handle grouped decorations */
1315    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1316     * have the same location, and we DON'T want to clobber. */
1317
1318    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1319       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1320       the word to determine which word contains the terminator. */
1321    uint32_t word = 3;
1322    while (entrypoint.word(word) & 0xff000000u) {
1323        ++word;
1324    }
1325    ++word;
1326
1327    for (; word < entrypoint.len(); word++) {
1328        auto insn = src->get_def(entrypoint.word(word));
1329        assert(insn != src->end());
1330        assert(insn.opcode() == spv::OpVariable);
1331
1332        if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
1333            unsigned id = insn.word(2);
1334            unsigned type = insn.word(1);
1335
1336            int location = value_or_default(var_locations, id, -1);
1337            int builtin = value_or_default(var_builtins, id, -1);
1338            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1339            bool is_patch = var_patch.find(id) != var_patch.end();
1340
1341            /* All variables and interface block members in the Input or Output storage classes
1342             * must be decorated with either a builtin or an explicit location.
1343             *
1344             * TODO: integrate the interface block support here. For now, don't complain --
1345             * a valid SPIRV module will only hit this path for the interface block case, as the
1346             * individual members of the type are decorated, rather than variable declarations.
1347             */
1348
1349            if (location != -1) {
1350                /* A user-defined interface variable, with a location. Where a variable
1351                 * occupied multiple locations, emit one result for each. */
1352                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1353                for (unsigned int offset = 0; offset < num_locations; offset++) {
1354                    interface_var v;
1355                    v.id = id;
1356                    v.type_id = type;
1357                    v.offset = offset;
1358                    v.is_patch = is_patch;
1359                    v.is_block_member = false;
1360                    out[std::make_pair(location + offset, component)] = v;
1361                }
1362            } else if (builtin == -1) {
1363                /* An interface block instance */
1364                collect_interface_block_members(my_data, src, out, blocks, is_array_of_verts, id, type, is_patch);
1365            }
1366        }
1367    }
1368}
1369
1370static void collect_interface_by_descriptor_slot(layer_data *my_data, shader_module const *src,
1371                                                 std::unordered_set<uint32_t> const &accessible_ids,
1372                                                 std::map<descriptor_slot_t, interface_var> &out) {
1373
1374    std::unordered_map<unsigned, unsigned> var_sets;
1375    std::unordered_map<unsigned, unsigned> var_bindings;
1376
1377    for (auto insn : *src) {
1378        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1379         * DecorationDescriptorSet and DecorationBinding.
1380         */
1381        if (insn.opcode() == spv::OpDecorate) {
1382            if (insn.word(2) == spv::DecorationDescriptorSet) {
1383                var_sets[insn.word(1)] = insn.word(3);
1384            }
1385
1386            if (insn.word(2) == spv::DecorationBinding) {
1387                var_bindings[insn.word(1)] = insn.word(3);
1388            }
1389        }
1390    }
1391
1392    for (auto id : accessible_ids) {
1393        auto insn = src->get_def(id);
1394        assert(insn != src->end());
1395
1396        if (insn.opcode() == spv::OpVariable &&
1397            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1398            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1399            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1400
1401            auto existing_it = out.find(std::make_pair(set, binding));
1402            if (existing_it != out.end()) {
1403                /* conflict within spv image */
1404                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1405                        __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
1406                        "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition",
1407                        insn.word(2), insn.word(1), storage_class_name(insn.word(3)), existing_it->first.first,
1408                        existing_it->first.second);
1409            }
1410
1411            interface_var v;
1412            v.id = insn.word(2);
1413            v.type_id = insn.word(1);
1414            v.offset = 0;
1415            v.is_patch = false;
1416            v.is_block_member = false;
1417            out[std::make_pair(set, binding)] = v;
1418        }
1419    }
1420}
1421
1422static bool validate_interface_between_stages(layer_data *my_data, shader_module const *producer,
1423                                              spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1424                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1425                                              shader_stage_attributes const *consumer_stage) {
1426    std::map<location_t, interface_var> outputs;
1427    std::map<location_t, interface_var> inputs;
1428
1429    bool pass = true;
1430
1431    collect_interface_by_location(my_data, producer, producer_entrypoint, spv::StorageClassOutput, outputs, producer_stage->arrayed_output);
1432    collect_interface_by_location(my_data, consumer, consumer_entrypoint, spv::StorageClassInput, inputs, consumer_stage->arrayed_input);
1433
1434    auto a_it = outputs.begin();
1435    auto b_it = inputs.begin();
1436
1437    /* maps sorted by key (location); walk them together to find mismatches */
1438    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1439        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1440        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1441        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1442        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1443
1444        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1445            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1446                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1447                        "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1448                        a_first.second, consumer_stage->name)) {
1449                pass = false;
1450            }
1451            a_it++;
1452        } else if (a_at_end || a_first > b_first) {
1453            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1454                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1455                        "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second,
1456                        producer_stage->name)) {
1457                pass = false;
1458            }
1459            b_it++;
1460        } else {
1461            // subtleties of arrayed interfaces:
1462            // - if is_patch, then the member is not arrayed, even though the interface may be.
1463            // - if is_block_member, then the extra array level of an arrayed interface is not
1464            //   expressed in the member type -- it's expressed in the block type.
1465            if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1466                             producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1467                             consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member,
1468                             true)) {
1469                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1470                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1471                            a_first.first, a_first.second,
1472                            describe_type(producer, a_it->second.type_id).c_str(),
1473                            describe_type(consumer, b_it->second.type_id).c_str())) {
1474                    pass = false;
1475                }
1476            }
1477            if (a_it->second.is_patch != b_it->second.is_patch) {
1478                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1479                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1480                            "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1481                            "per-%s in %s stage", a_first.first, a_first.second,
1482                            a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1483                            b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1484                    pass = false;
1485                }
1486            }
1487            a_it++;
1488            b_it++;
1489        }
1490    }
1491
1492    return pass;
1493}
1494
1495enum FORMAT_TYPE {
1496    FORMAT_TYPE_UNDEFINED,
1497    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1498    FORMAT_TYPE_SINT,
1499    FORMAT_TYPE_UINT,
1500};
1501
1502static unsigned get_format_type(VkFormat fmt) {
1503    switch (fmt) {
1504    case VK_FORMAT_UNDEFINED:
1505        return FORMAT_TYPE_UNDEFINED;
1506    case VK_FORMAT_R8_SINT:
1507    case VK_FORMAT_R8G8_SINT:
1508    case VK_FORMAT_R8G8B8_SINT:
1509    case VK_FORMAT_R8G8B8A8_SINT:
1510    case VK_FORMAT_R16_SINT:
1511    case VK_FORMAT_R16G16_SINT:
1512    case VK_FORMAT_R16G16B16_SINT:
1513    case VK_FORMAT_R16G16B16A16_SINT:
1514    case VK_FORMAT_R32_SINT:
1515    case VK_FORMAT_R32G32_SINT:
1516    case VK_FORMAT_R32G32B32_SINT:
1517    case VK_FORMAT_R32G32B32A32_SINT:
1518    case VK_FORMAT_R64_SINT:
1519    case VK_FORMAT_R64G64_SINT:
1520    case VK_FORMAT_R64G64B64_SINT:
1521    case VK_FORMAT_R64G64B64A64_SINT:
1522    case VK_FORMAT_B8G8R8_SINT:
1523    case VK_FORMAT_B8G8R8A8_SINT:
1524    case VK_FORMAT_A8B8G8R8_SINT_PACK32:
1525    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1526    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1527        return FORMAT_TYPE_SINT;
1528    case VK_FORMAT_R8_UINT:
1529    case VK_FORMAT_R8G8_UINT:
1530    case VK_FORMAT_R8G8B8_UINT:
1531    case VK_FORMAT_R8G8B8A8_UINT:
1532    case VK_FORMAT_R16_UINT:
1533    case VK_FORMAT_R16G16_UINT:
1534    case VK_FORMAT_R16G16B16_UINT:
1535    case VK_FORMAT_R16G16B16A16_UINT:
1536    case VK_FORMAT_R32_UINT:
1537    case VK_FORMAT_R32G32_UINT:
1538    case VK_FORMAT_R32G32B32_UINT:
1539    case VK_FORMAT_R32G32B32A32_UINT:
1540    case VK_FORMAT_R64_UINT:
1541    case VK_FORMAT_R64G64_UINT:
1542    case VK_FORMAT_R64G64B64_UINT:
1543    case VK_FORMAT_R64G64B64A64_UINT:
1544    case VK_FORMAT_B8G8R8_UINT:
1545    case VK_FORMAT_B8G8R8A8_UINT:
1546    case VK_FORMAT_A8B8G8R8_UINT_PACK32:
1547    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1548    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1549        return FORMAT_TYPE_UINT;
1550    default:
1551        return FORMAT_TYPE_FLOAT;
1552    }
1553}
1554
1555/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1556 * for comparison to a VkFormat's characterization above. */
1557static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1558    auto insn = src->get_def(type);
1559    assert(insn != src->end());
1560
1561    switch (insn.opcode()) {
1562    case spv::OpTypeInt:
1563        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1564    case spv::OpTypeFloat:
1565        return FORMAT_TYPE_FLOAT;
1566    case spv::OpTypeVector:
1567        return get_fundamental_type(src, insn.word(2));
1568    case spv::OpTypeMatrix:
1569        return get_fundamental_type(src, insn.word(2));
1570    case spv::OpTypeArray:
1571        return get_fundamental_type(src, insn.word(2));
1572    case spv::OpTypePointer:
1573        return get_fundamental_type(src, insn.word(3));
1574    default:
1575        return FORMAT_TYPE_UNDEFINED;
1576    }
1577}
1578
1579static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1580    uint32_t bit_pos = u_ffs(stage);
1581    return bit_pos - 1;
1582}
1583
1584static bool validate_vi_consistency(layer_data *my_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1585    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1586     * each binding should be specified only once.
1587     */
1588    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1589    bool pass = true;
1590
1591    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1592        auto desc = &vi->pVertexBindingDescriptions[i];
1593        auto &binding = bindings[desc->binding];
1594        if (binding) {
1595            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1596                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1597                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1598                pass = false;
1599            }
1600        } else {
1601            binding = desc;
1602        }
1603    }
1604
1605    return pass;
1606}
1607
1608static bool validate_vi_against_vs_inputs(layer_data *my_data, VkPipelineVertexInputStateCreateInfo const *vi,
1609                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1610    std::map<location_t, interface_var> inputs;
1611    bool pass = true;
1612
1613    collect_interface_by_location(my_data, vs, entrypoint, spv::StorageClassInput, inputs, false);
1614
1615    /* Build index by location */
1616    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1617    if (vi) {
1618        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1619            auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1620            for (auto j = 0u; j < num_locations; j++) {
1621                attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1622            }
1623        }
1624    }
1625
1626    auto it_a = attribs.begin();
1627    auto it_b = inputs.begin();
1628
1629    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1630        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1631        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1632        auto a_first = a_at_end ? 0 : it_a->first;
1633        auto b_first = b_at_end ? 0 : it_b->first.first;
1634        if (!a_at_end && (b_at_end || a_first < b_first)) {
1635            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1636                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1637                        "Vertex attribute at location %d not consumed by VS", a_first)) {
1638                pass = false;
1639            }
1640            it_a++;
1641        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1642            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1643                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
1644                        b_first)) {
1645                pass = false;
1646            }
1647            it_b++;
1648        } else {
1649            unsigned attrib_type = get_format_type(it_a->second->format);
1650            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1651
1652            /* type checking */
1653            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1654                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1655                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1656                            "Attribute type of `%s` at location %d does not match VS input type of `%s`",
1657                            string_VkFormat(it_a->second->format), a_first,
1658                            describe_type(vs, it_b->second.type_id).c_str())) {
1659                    pass = false;
1660                }
1661            }
1662
1663            /* OK! */
1664            it_a++;
1665            it_b++;
1666        }
1667    }
1668
1669    return pass;
1670}
1671
1672static bool validate_fs_outputs_against_render_pass(layer_data *my_data, shader_module const *fs,
1673                                                    spirv_inst_iter entrypoint, RENDER_PASS_NODE const *rp, uint32_t subpass) {
1674    std::map<location_t, interface_var> outputs;
1675    std::map<uint32_t, VkFormat> color_attachments;
1676    for (auto i = 0u; i < rp->subpassColorFormats[subpass].size(); i++) {
1677        if (rp->subpassColorFormats[subpass][i] != VK_FORMAT_UNDEFINED) {
1678            color_attachments[i] = rp->subpassColorFormats[subpass][i];
1679        }
1680    }
1681
1682    bool pass = true;
1683
1684    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1685
1686    collect_interface_by_location(my_data, fs, entrypoint, spv::StorageClassOutput, outputs, false);
1687
1688    auto it_a = outputs.begin();
1689    auto it_b = color_attachments.begin();
1690
1691    /* Walk attachment list and outputs together */
1692
1693    while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
1694        bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
1695        bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
1696
1697        if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
1698            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1699                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1700                        "FS writes to output location %d with no matching attachment", it_a->first.first)) {
1701                pass = false;
1702            }
1703            it_a++;
1704        } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
1705            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1706                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", it_b->first)) {
1707                pass = false;
1708            }
1709            it_b++;
1710        } else {
1711            unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
1712            unsigned att_type = get_format_type(it_b->second);
1713
1714            /* type checking */
1715            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1716                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1717                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1718                            "Attachment %d of type `%s` does not match FS output type of `%s`", it_b->first,
1719                            string_VkFormat(it_b->second),
1720                            describe_type(fs, it_a->second.type_id).c_str())) {
1721                    pass = false;
1722                }
1723            }
1724
1725            /* OK! */
1726            it_a++;
1727            it_b++;
1728        }
1729    }
1730
1731    return pass;
1732}
1733
1734/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
1735 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
1736 * for example.
1737 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1738 *  - NOT the shader input/output interfaces.
1739 *
1740 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1741 * converting parts of this to be generated from the machine-readable spec instead.
1742 */
1743static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) {
1744    std::unordered_set<uint32_t> worklist;
1745    worklist.insert(entrypoint.word(2));
1746
1747    while (!worklist.empty()) {
1748        auto id_iter = worklist.begin();
1749        auto id = *id_iter;
1750        worklist.erase(id_iter);
1751
1752        auto insn = src->get_def(id);
1753        if (insn == src->end()) {
1754            /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble
1755             * across all kinds of things here that we may not care about. */
1756            continue;
1757        }
1758
1759        /* try to add to the output set */
1760        if (!ids.insert(id).second) {
1761            continue; /* if we already saw this id, we don't want to walk it again. */
1762        }
1763
1764        switch (insn.opcode()) {
1765        case spv::OpFunction:
1766            /* scan whole body of the function, enlisting anything interesting */
1767            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1768                switch (insn.opcode()) {
1769                case spv::OpLoad:
1770                case spv::OpAtomicLoad:
1771                case spv::OpAtomicExchange:
1772                case spv::OpAtomicCompareExchange:
1773                case spv::OpAtomicCompareExchangeWeak:
1774                case spv::OpAtomicIIncrement:
1775                case spv::OpAtomicIDecrement:
1776                case spv::OpAtomicIAdd:
1777                case spv::OpAtomicISub:
1778                case spv::OpAtomicSMin:
1779                case spv::OpAtomicUMin:
1780                case spv::OpAtomicSMax:
1781                case spv::OpAtomicUMax:
1782                case spv::OpAtomicAnd:
1783                case spv::OpAtomicOr:
1784                case spv::OpAtomicXor:
1785                    worklist.insert(insn.word(3)); /* ptr */
1786                    break;
1787                case spv::OpStore:
1788                case spv::OpAtomicStore:
1789                    worklist.insert(insn.word(1)); /* ptr */
1790                    break;
1791                case spv::OpAccessChain:
1792                case spv::OpInBoundsAccessChain:
1793                    worklist.insert(insn.word(3)); /* base ptr */
1794                    break;
1795                case spv::OpSampledImage:
1796                case spv::OpImageSampleImplicitLod:
1797                case spv::OpImageSampleExplicitLod:
1798                case spv::OpImageSampleDrefImplicitLod:
1799                case spv::OpImageSampleDrefExplicitLod:
1800                case spv::OpImageSampleProjImplicitLod:
1801                case spv::OpImageSampleProjExplicitLod:
1802                case spv::OpImageSampleProjDrefImplicitLod:
1803                case spv::OpImageSampleProjDrefExplicitLod:
1804                case spv::OpImageFetch:
1805                case spv::OpImageGather:
1806                case spv::OpImageDrefGather:
1807                case spv::OpImageRead:
1808                case spv::OpImage:
1809                case spv::OpImageQueryFormat:
1810                case spv::OpImageQueryOrder:
1811                case spv::OpImageQuerySizeLod:
1812                case spv::OpImageQuerySize:
1813                case spv::OpImageQueryLod:
1814                case spv::OpImageQueryLevels:
1815                case spv::OpImageQuerySamples:
1816                case spv::OpImageSparseSampleImplicitLod:
1817                case spv::OpImageSparseSampleExplicitLod:
1818                case spv::OpImageSparseSampleDrefImplicitLod:
1819                case spv::OpImageSparseSampleDrefExplicitLod:
1820                case spv::OpImageSparseSampleProjImplicitLod:
1821                case spv::OpImageSparseSampleProjExplicitLod:
1822                case spv::OpImageSparseSampleProjDrefImplicitLod:
1823                case spv::OpImageSparseSampleProjDrefExplicitLod:
1824                case spv::OpImageSparseFetch:
1825                case spv::OpImageSparseGather:
1826                case spv::OpImageSparseDrefGather:
1827                case spv::OpImageTexelPointer:
1828                    worklist.insert(insn.word(3)); /* image or sampled image */
1829                    break;
1830                case spv::OpImageWrite:
1831                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
1832                    break;
1833                case spv::OpFunctionCall:
1834                    for (uint32_t i = 3; i < insn.len(); i++) {
1835                        worklist.insert(insn.word(i)); /* fn itself, and all args */
1836                    }
1837                    break;
1838
1839                case spv::OpExtInst:
1840                    for (uint32_t i = 5; i < insn.len(); i++) {
1841                        worklist.insert(insn.word(i)); /* operands to ext inst */
1842                    }
1843                    break;
1844                }
1845            }
1846            break;
1847        }
1848    }
1849}
1850
1851static bool validate_push_constant_block_against_pipeline(layer_data *my_data,
1852                                                          std::vector<VkPushConstantRange> const *pushConstantRanges,
1853                                                          shader_module const *src, spirv_inst_iter type,
1854                                                          VkShaderStageFlagBits stage) {
1855    bool pass = true;
1856
1857    /* strip off ptrs etc */
1858    type = get_struct_type(src, type, false);
1859    assert(type != src->end());
1860
1861    /* validate directly off the offsets. this isn't quite correct for arrays
1862     * and matrices, but is a good first step. TODO: arrays, matrices, weird
1863     * sizes */
1864    for (auto insn : *src) {
1865        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1866
1867            if (insn.word(3) == spv::DecorationOffset) {
1868                unsigned offset = insn.word(4);
1869                auto size = 4; /* bytes; TODO: calculate this based on the type */
1870
1871                bool found_range = false;
1872                for (auto const &range : *pushConstantRanges) {
1873                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
1874                        found_range = true;
1875
1876                        if ((range.stageFlags & stage) == 0) {
1877                            if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1878                                        __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
1879                                        "Push constant range covering variable starting at "
1880                                        "offset %u not accessible from stage %s",
1881                                        offset, string_VkShaderStageFlagBits(stage))) {
1882                                pass = false;
1883                            }
1884                        }
1885
1886                        break;
1887                    }
1888                }
1889
1890                if (!found_range) {
1891                    if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1892                                __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
1893                                "Push constant range covering variable starting at "
1894                                "offset %u not declared in layout",
1895                                offset)) {
1896                        pass = false;
1897                    }
1898                }
1899            }
1900        }
1901    }
1902
1903    return pass;
1904}
1905
1906static bool validate_push_constant_usage(layer_data *my_data,
1907                                         std::vector<VkPushConstantRange> const *pushConstantRanges, shader_module const *src,
1908                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
1909    bool pass = true;
1910
1911    for (auto id : accessible_ids) {
1912        auto def_insn = src->get_def(id);
1913        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
1914            pass &= validate_push_constant_block_against_pipeline(my_data, pushConstantRanges, src,
1915                                                                 src->get_def(def_insn.word(1)), stage);
1916        }
1917    }
1918
1919    return pass;
1920}
1921
1922// For given pipelineLayout verify that the set_layout_node at slot.first
1923//  has the requested binding at slot.second and return ptr to that binding
1924static VkDescriptorSetLayoutBinding const * get_descriptor_binding(layer_data *my_data, PIPELINE_LAYOUT_NODE *pipelineLayout, descriptor_slot_t slot) {
1925
1926    if (!pipelineLayout)
1927        return nullptr;
1928
1929    if (slot.first >= pipelineLayout->descriptorSetLayouts.size())
1930        return nullptr;
1931
1932    const auto & layout_node = my_data->descriptorSetLayoutMap[pipelineLayout->descriptorSetLayouts[slot.first]];
1933    return layout_node->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
1934}
1935
1936// Block of code at start here for managing/tracking Pipeline state that this layer cares about
1937
1938static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
1939
1940// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
1941//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
1942//   to that same cmd buffer by separate thread are not changing state from underneath us
1943// Track the last cmd buffer touched by this thread
1944
1945static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) {
1946    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
1947        if (pCB->drawCount[i])
1948            return true;
1949    }
1950    return false;
1951}
1952
1953// Check object status for selected flag state
1954static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
1955                            DRAW_STATE_ERROR error_code, const char *fail_msg) {
1956    if (!(pNode->status & status_mask)) {
1957        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
1958                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS",
1959                       "CB object %#" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg);
1960    }
1961    return false;
1962}
1963
1964// Retrieve pipeline node ptr for given pipeline object
1965static PIPELINE_NODE *getPipeline(layer_data *my_data, const VkPipeline pipeline) {
1966    if (my_data->pipelineMap.find(pipeline) == my_data->pipelineMap.end()) {
1967        return NULL;
1968    }
1969    return my_data->pipelineMap[pipeline];
1970}
1971
1972// Return true if for a given PSO, the given state enum is dynamic, else return false
1973static bool isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
1974    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
1975        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
1976            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
1977                return true;
1978        }
1979    }
1980    return false;
1981}
1982
1983// Validate state stored as flags at time of draw call
1984static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe, bool indexedDraw) {
1985    bool result;
1986    result = validate_status(dev_data, pCB, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_VIEWPORT_NOT_BOUND,
1987                             "Dynamic viewport state not set for this command buffer");
1988    result |= validate_status(dev_data, pCB, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_SCISSOR_NOT_BOUND,
1989                              "Dynamic scissor state not set for this command buffer");
1990    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
1991        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
1992         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
1993        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1994                                  DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
1995    }
1996    if (pPipe->graphicsPipelineCI.pRasterizationState &&
1997        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
1998        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
1999                                  DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
2000    }
2001    if (pPipe->blendConstantsEnabled) {
2002        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2003                                  DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer");
2004    }
2005    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2006        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
2007        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2008                                  DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
2009    }
2010    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2011        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
2012        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2013                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
2014        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2015                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
2016        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2017                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
2018    }
2019    if (indexedDraw) {
2020        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2021                                  DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2022                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2023    }
2024    return result;
2025}
2026
2027// Verify attachment reference compatibility according to spec
2028//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2029//  If both AttachmentReference arrays have requested index, check their corresponding AttachementDescriptions
2030//   to make sure that format and samples counts match.
2031//  If not, they are not compatible.
2032static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2033                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2034                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2035                                             const VkAttachmentDescription *pSecondaryAttachments) {
2036    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2037        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2038            return true;
2039    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2040        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2041            return true;
2042    } else { // format and sample count must match
2043        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2044             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2045            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2046             pSecondaryAttachments[pSecondary[index].attachment].samples))
2047            return true;
2048    }
2049    // Format and sample counts didn't match
2050    return false;
2051}
2052
2053// For give primary and secondary RenderPass objects, verify that they're compatible
2054static bool verify_renderpass_compatibility(layer_data *my_data, const VkRenderPass primaryRP, const VkRenderPass secondaryRP,
2055                                            string &errorMsg) {
2056    if (my_data->renderPassMap.find(primaryRP) == my_data->renderPassMap.end()) {
2057        stringstream errorStr;
2058        errorStr << "invalid VkRenderPass (" << primaryRP << ")";
2059        errorMsg = errorStr.str();
2060        return false;
2061    } else if (my_data->renderPassMap.find(secondaryRP) == my_data->renderPassMap.end()) {
2062        stringstream errorStr;
2063        errorStr << "invalid VkRenderPass (" << secondaryRP << ")";
2064        errorMsg = errorStr.str();
2065        return false;
2066    }
2067    // Trivial pass case is exact same RP
2068    if (primaryRP == secondaryRP) {
2069        return true;
2070    }
2071    const VkRenderPassCreateInfo *primaryRPCI = my_data->renderPassMap[primaryRP]->pCreateInfo;
2072    const VkRenderPassCreateInfo *secondaryRPCI = my_data->renderPassMap[secondaryRP]->pCreateInfo;
2073    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2074        stringstream errorStr;
2075        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2076                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2077        errorMsg = errorStr.str();
2078        return false;
2079    }
2080    uint32_t spIndex = 0;
2081    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2082        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2083        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2084        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2085        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2086        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2087            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2088                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2089                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2090                stringstream errorStr;
2091                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2092                errorMsg = errorStr.str();
2093                return false;
2094            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2095                                                         primaryColorCount, primaryRPCI->pAttachments,
2096                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2097                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2098                stringstream errorStr;
2099                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2100                errorMsg = errorStr.str();
2101                return false;
2102            }
2103        }
2104
2105        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2106                                              1, primaryRPCI->pAttachments,
2107                                              secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2108                                              1, secondaryRPCI->pAttachments)) {
2109            stringstream errorStr;
2110            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2111            errorMsg = errorStr.str();
2112            return false;
2113        }
2114
2115        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2116        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2117        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2118        for (uint32_t i = 0; i < inputMax; ++i) {
2119            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2120                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2121                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2122                stringstream errorStr;
2123                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2124                errorMsg = errorStr.str();
2125                return false;
2126            }
2127        }
2128    }
2129    return true;
2130}
2131
2132// For give SET_NODE, verify that its Set is compatible w/ the setLayout corresponding to pipelineLayout[layoutIndex]
2133static bool verify_set_layout_compatibility(layer_data *my_data, const SET_NODE *pSet, const VkPipelineLayout layout,
2134                                            const uint32_t layoutIndex, string &errorMsg) {
2135    auto pipeline_layout_it = my_data->pipelineLayoutMap.find(layout);
2136    if (pipeline_layout_it == my_data->pipelineLayoutMap.end()) {
2137        stringstream errorStr;
2138        errorStr << "invalid VkPipelineLayout (" << layout << ")";
2139        errorMsg = errorStr.str();
2140        return false;
2141    }
2142    if (layoutIndex >= pipeline_layout_it->second.descriptorSetLayouts.size()) {
2143        stringstream errorStr;
2144        errorStr << "VkPipelineLayout (" << layout << ") only contains " << pipeline_layout_it->second.descriptorSetLayouts.size()
2145                 << " setLayouts corresponding to sets 0-" << pipeline_layout_it->second.descriptorSetLayouts.size() - 1
2146                 << ", but you're attempting to bind set to index " << layoutIndex;
2147        errorMsg = errorStr.str();
2148        return false;
2149    }
2150    auto layout_node = my_data->descriptorSetLayoutMap[pipeline_layout_it->second.descriptorSetLayouts[layoutIndex]];
2151    return layout_node->IsCompatible(pSet->p_layout, &errorMsg);
2152}
2153
2154// Validate that data for each specialization entry is fully contained within the buffer.
2155static bool validate_specialization_offsets(layer_data *my_data, VkPipelineShaderStageCreateInfo const *info) {
2156    bool pass = true;
2157
2158    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2159
2160    if (spec) {
2161        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2162            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2163                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2164                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2165                            "Specialization entry %u (for constant id %u) references memory outside provided "
2166                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2167                            " bytes provided)",
2168                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2169                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2170
2171                    pass = false;
2172                }
2173            }
2174        }
2175    }
2176
2177    return pass;
2178}
2179
2180static bool descriptor_type_match(layer_data *my_data, shader_module const *module, uint32_t type_id,
2181                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2182    auto type = module->get_def(type_id);
2183
2184    descriptor_count = 1;
2185
2186    /* Strip off any array or ptrs. Where we remove array levels, adjust the
2187     * descriptor count for each dimension. */
2188    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2189        if (type.opcode() == spv::OpTypeArray) {
2190            descriptor_count *= get_constant_value(module, type.word(3));
2191            type = module->get_def(type.word(2));
2192        }
2193        else {
2194            type = module->get_def(type.word(3));
2195        }
2196    }
2197
2198    switch (type.opcode()) {
2199    case spv::OpTypeStruct: {
2200        for (auto insn : *module) {
2201            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2202                if (insn.word(2) == spv::DecorationBlock) {
2203                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2204                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2205                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2206                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2207                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2208                }
2209            }
2210        }
2211
2212        /* Invalid */
2213        return false;
2214    }
2215
2216    case spv::OpTypeSampler:
2217        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER;
2218
2219    case spv::OpTypeSampledImage:
2220        if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2221            /* Slight relaxation for some GLSL historical madness: samplerBuffer
2222             * doesn't really have a sampler, and a texel buffer descriptor
2223             * doesn't really provide one. Allow this slight mismatch.
2224             */
2225            auto image_type = module->get_def(type.word(2));
2226            auto dim = image_type.word(3);
2227            auto sampled = image_type.word(7);
2228            return dim == spv::DimBuffer && sampled == 1;
2229        }
2230        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2231
2232    case spv::OpTypeImage: {
2233        /* Many descriptor types backing image types-- depends on dimension
2234         * and whether the image will be used with a sampler. SPIRV for
2235         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2236         * runtime is unacceptable.
2237         */
2238        auto dim = type.word(3);
2239        auto sampled = type.word(7);
2240
2241        if (dim == spv::DimSubpassData) {
2242            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2243        } else if (dim == spv::DimBuffer) {
2244            if (sampled == 1) {
2245                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2246            } else {
2247                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2248            }
2249        } else if (sampled == 1) {
2250            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
2251        } else {
2252            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2253        }
2254    }
2255
2256    /* We shouldn't really see any other junk types -- but if we do, they're
2257     * a mismatch.
2258     */
2259    default:
2260        return false; /* Mismatch */
2261    }
2262}
2263
2264static bool require_feature(layer_data *my_data, VkBool32 feature, char const *feature_name) {
2265    if (!feature) {
2266        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2267                    __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2268                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2269                    "enabled on the device",
2270                    feature_name)) {
2271            return false;
2272        }
2273    }
2274
2275    return true;
2276}
2277
2278static bool validate_shader_capabilities(layer_data *my_data, shader_module const *src) {
2279    bool pass = true;
2280
2281    auto enabledFeatures = &my_data->phys_dev_properties.features;
2282
2283    for (auto insn : *src) {
2284        if (insn.opcode() == spv::OpCapability) {
2285            switch (insn.word(1)) {
2286            case spv::CapabilityMatrix:
2287            case spv::CapabilityShader:
2288            case spv::CapabilityInputAttachment:
2289            case spv::CapabilitySampled1D:
2290            case spv::CapabilityImage1D:
2291            case spv::CapabilitySampledBuffer:
2292            case spv::CapabilityImageBuffer:
2293            case spv::CapabilityImageQuery:
2294            case spv::CapabilityDerivativeControl:
2295                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2296                break;
2297
2298            case spv::CapabilityGeometry:
2299                pass &= require_feature(my_data, enabledFeatures->geometryShader, "geometryShader");
2300                break;
2301
2302            case spv::CapabilityTessellation:
2303                pass &= require_feature(my_data, enabledFeatures->tessellationShader, "tessellationShader");
2304                break;
2305
2306            case spv::CapabilityFloat64:
2307                pass &= require_feature(my_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2308                break;
2309
2310            case spv::CapabilityInt64:
2311                pass &= require_feature(my_data, enabledFeatures->shaderInt64, "shaderInt64");
2312                break;
2313
2314            case spv::CapabilityTessellationPointSize:
2315            case spv::CapabilityGeometryPointSize:
2316                pass &= require_feature(my_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2317                                        "shaderTessellationAndGeometryPointSize");
2318                break;
2319
2320            case spv::CapabilityImageGatherExtended:
2321                pass &= require_feature(my_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2322                break;
2323
2324            case spv::CapabilityStorageImageMultisample:
2325                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2326                break;
2327
2328            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2329                pass &= require_feature(my_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2330                                        "shaderUniformBufferArrayDynamicIndexing");
2331                break;
2332
2333            case spv::CapabilitySampledImageArrayDynamicIndexing:
2334                pass &= require_feature(my_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2335                                        "shaderSampledImageArrayDynamicIndexing");
2336                break;
2337
2338            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2339                pass &= require_feature(my_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2340                                        "shaderStorageBufferArrayDynamicIndexing");
2341                break;
2342
2343            case spv::CapabilityStorageImageArrayDynamicIndexing:
2344                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2345                                        "shaderStorageImageArrayDynamicIndexing");
2346                break;
2347
2348            case spv::CapabilityClipDistance:
2349                pass &= require_feature(my_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2350                break;
2351
2352            case spv::CapabilityCullDistance:
2353                pass &= require_feature(my_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2354                break;
2355
2356            case spv::CapabilityImageCubeArray:
2357                pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2358                break;
2359
2360            case spv::CapabilitySampleRateShading:
2361                pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2362                break;
2363
2364            case spv::CapabilitySparseResidency:
2365                pass &= require_feature(my_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2366                break;
2367
2368            case spv::CapabilityMinLod:
2369                pass &= require_feature(my_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2370                break;
2371
2372            case spv::CapabilitySampledCubeArray:
2373                pass &= require_feature(my_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2374                break;
2375
2376            case spv::CapabilityImageMSArray:
2377                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2378                break;
2379
2380            case spv::CapabilityStorageImageExtendedFormats:
2381                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageExtendedFormats,
2382                                        "shaderStorageImageExtendedFormats");
2383                break;
2384
2385            case spv::CapabilityInterpolationFunction:
2386                pass &= require_feature(my_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2387                break;
2388
2389            case spv::CapabilityStorageImageReadWithoutFormat:
2390                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2391                                        "shaderStorageImageReadWithoutFormat");
2392                break;
2393
2394            case spv::CapabilityStorageImageWriteWithoutFormat:
2395                pass &= require_feature(my_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2396                                        "shaderStorageImageWriteWithoutFormat");
2397                break;
2398
2399            case spv::CapabilityMultiViewport:
2400                pass &= require_feature(my_data, enabledFeatures->multiViewport, "multiViewport");
2401                break;
2402
2403            default:
2404                if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2405                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2406                            "Shader declares capability %u, not supported in Vulkan.",
2407                            insn.word(1)))
2408                    pass = false;
2409                break;
2410            }
2411        }
2412    }
2413
2414    return pass;
2415}
2416
2417static bool validate_pipeline_shader_stage(layer_data *dev_data, VkPipelineShaderStageCreateInfo const *pStage,
2418                                           PIPELINE_NODE *pipeline, PIPELINE_LAYOUT_NODE *pipelineLayout,
2419                                           shader_module **out_module, spirv_inst_iter *out_entrypoint) {
2420    bool pass = true;
2421    auto module = *out_module = dev_data->shaderModuleMap[pStage->module].get();
2422    pass &= validate_specialization_offsets(dev_data, pStage);
2423
2424    /* find the entrypoint */
2425    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2426    if (entrypoint == module->end()) {
2427        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2428                    __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2429                    "No entrypoint found named `%s` for stage %s", pStage->pName,
2430                    string_VkShaderStageFlagBits(pStage->stage))) {
2431            pass = false;
2432        }
2433    }
2434
2435    /* validate shader capabilities against enabled device features */
2436    pass &= validate_shader_capabilities(dev_data, module);
2437
2438    /* mark accessible ids */
2439    std::unordered_set<uint32_t> accessible_ids;
2440    mark_accessible_ids(module, entrypoint, accessible_ids);
2441
2442    /* validate descriptor set layout against what the entrypoint actually uses */
2443    std::map<descriptor_slot_t, interface_var> descriptor_uses;
2444    collect_interface_by_descriptor_slot(dev_data, module, accessible_ids, descriptor_uses);
2445
2446    /* validate push constant usage */
2447    pass &= validate_push_constant_usage(dev_data, &pipelineLayout->pushConstantRanges,
2448                                        module, accessible_ids, pStage->stage);
2449
2450    /* validate descriptor use */
2451    for (auto use : descriptor_uses) {
2452        // While validating shaders capture which slots are used by the pipeline
2453        pipeline->active_slots[use.first.first].insert(use.first.second);
2454
2455        /* verify given pipelineLayout has requested setLayout with requested binding */
2456        const auto & binding = get_descriptor_binding(dev_data, pipelineLayout, use.first);
2457        unsigned required_descriptor_count;
2458
2459        if (!binding) {
2460            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2461                        __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2462                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2463                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2464                pass = false;
2465            }
2466        } else if (~binding->stageFlags & pStage->stage) {
2467            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2468                        /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2469                        "Shader uses descriptor slot %u.%u (used "
2470                        "as type `%s`) but descriptor not "
2471                        "accessible from stage %s",
2472                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2473                        string_VkShaderStageFlagBits(pStage->stage))) {
2474                pass = false;
2475            }
2476        } else if (!descriptor_type_match(dev_data, module, use.second.type_id, binding->descriptorType,
2477                                          /*out*/ required_descriptor_count)) {
2478            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2479                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", "Type mismatch on descriptor slot "
2480                                                                       "%u.%u (used as type `%s`) but "
2481                                                                       "descriptor of type %s",
2482                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2483                        string_VkDescriptorType(binding->descriptorType))) {
2484                pass = false;
2485            }
2486        } else if (binding->descriptorCount < required_descriptor_count) {
2487            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2488                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2489                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2490                        required_descriptor_count, use.first.first, use.first.second,
2491                        describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
2492                pass = false;
2493            }
2494        }
2495    }
2496
2497    return pass;
2498}
2499
2500
2501// Validate that the shaders used by the given pipeline and store the active_slots
2502//  that are actually used by the pipeline into pPipeline->active_slots
2503static bool validate_and_capture_pipeline_shader_state(layer_data *my_data, PIPELINE_NODE *pPipeline) {
2504    auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr();
2505    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2506    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2507
2508    shader_module *shaders[5];
2509    memset(shaders, 0, sizeof(shaders));
2510    spirv_inst_iter entrypoints[5];
2511    memset(entrypoints, 0, sizeof(entrypoints));
2512    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2513    bool pass = true;
2514
2515    auto pipelineLayout = pCreateInfo->layout != VK_NULL_HANDLE ? &my_data->pipelineLayoutMap[pCreateInfo->layout] : nullptr;
2516
2517    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2518        auto pStage = &pCreateInfo->pStages[i];
2519        auto stage_id = get_shader_stage_id(pStage->stage);
2520        pass &= validate_pipeline_shader_stage(my_data, pStage, pPipeline, pipelineLayout,
2521                                               &shaders[stage_id], &entrypoints[stage_id]);
2522    }
2523
2524    vi = pCreateInfo->pVertexInputState;
2525
2526    if (vi) {
2527        pass &= validate_vi_consistency(my_data, vi);
2528    }
2529
2530    if (shaders[vertex_stage]) {
2531        pass &= validate_vi_against_vs_inputs(my_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2532    }
2533
2534    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2535    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2536
2537    while (!shaders[producer] && producer != fragment_stage) {
2538        producer++;
2539        consumer++;
2540    }
2541
2542    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2543        assert(shaders[producer]);
2544        if (shaders[consumer]) {
2545            pass &= validate_interface_between_stages(my_data,
2546                                                      shaders[producer], entrypoints[producer], &shader_stage_attribs[producer],
2547                                                      shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]);
2548
2549            producer = consumer;
2550        }
2551    }
2552
2553    auto rp = pCreateInfo->renderPass != VK_NULL_HANDLE ? my_data->renderPassMap[pCreateInfo->renderPass] : nullptr;
2554
2555    if (shaders[fragment_stage] && rp) {
2556        pass &= validate_fs_outputs_against_render_pass(my_data, shaders[fragment_stage], entrypoints[fragment_stage], rp,
2557                                                       pCreateInfo->subpass);
2558    }
2559
2560    return pass;
2561}
2562
2563static bool validate_compute_pipeline(layer_data *my_data, PIPELINE_NODE *pPipeline) {
2564    auto pCreateInfo = pPipeline->computePipelineCI.ptr();
2565
2566    auto pipelineLayout = pCreateInfo->layout != VK_NULL_HANDLE ? &my_data->pipelineLayoutMap[pCreateInfo->layout] : nullptr;
2567
2568    shader_module *module;
2569    spirv_inst_iter entrypoint;
2570
2571    return validate_pipeline_shader_stage(my_data, &pCreateInfo->stage, pPipeline, pipelineLayout,
2572                                          &module, &entrypoint);
2573}
2574
2575// Return Set node ptr for specified set or else NULL
2576static SET_NODE *getSetNode(layer_data *my_data, const VkDescriptorSet set) {
2577    if (my_data->setMap.find(set) == my_data->setMap.end()) {
2578        return NULL;
2579    }
2580    return my_data->setMap[set];
2581}
2582// For the given command buffer, verify and update the state for activeSetBindingsPairs
2583//  This includes:
2584//  1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound.
2585//     To be valid, the dynamic offset combined with the offset and range from its
2586//     descriptor update must not overflow the size of its buffer being updated
2587//  2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images
2588//  3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers
2589static bool validate_and_update_drawtime_descriptor_state(
2590    layer_data *dev_data, GLOBAL_CB_NODE *pCB,
2591    const vector<std::pair<SET_NODE *, unordered_set<uint32_t>>> &activeSetBindingsPairs) {
2592    bool result = false;
2593
2594    VkWriteDescriptorSet *pWDS = NULL;
2595    uint32_t dynOffsetIndex = 0;
2596    VkDeviceSize bufferSize = 0;
2597    for (auto set_bindings_pair : activeSetBindingsPairs) {
2598        SET_NODE *set_node = set_bindings_pair.first;
2599        auto layout_node = set_node->p_layout;
2600        for (auto binding : set_bindings_pair.second) {
2601            if ((set_node->p_layout->GetTypeFromBinding(binding) == VK_DESCRIPTOR_TYPE_SAMPLER) &&
2602                (set_node->p_layout->GetDescriptorCountFromBinding(binding) != 0) &&
2603                (set_node->p_layout->GetImmutableSamplerPtrFromBinding(binding))) {
2604                // No work for immutable sampler binding
2605            } else {
2606                uint32_t startIdx = layout_node->GetGlobalStartIndexFromBinding(binding);
2607                uint32_t endIdx = layout_node->GetGlobalEndIndexFromBinding(binding);
2608                for (uint32_t i = startIdx; i <= endIdx; ++i) {
2609                    // We did check earlier to verify that set was updated, but now make sure given slot was updated
2610                    // TODO : Would be better to store set# that set is bound to so we can report set.binding[index] not updated
2611                    // For immutable sampler w/o combined image, don't need to update
2612                    if (!set_node->pDescriptorUpdates[i]) {
2613                        result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2614                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2615                                            DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2616                                            "DS %#" PRIxLEAST64 " bound and active but it never had binding %u updated. It is now being used to draw so "
2617                                                                "this will result in undefined behavior.",
2618                                            reinterpret_cast<const uint64_t &>(set_node->set), binding);
2619                    } else {
2620                        switch (set_node->pDescriptorUpdates[i]->sType) {
2621                        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
2622                            pWDS = (VkWriteDescriptorSet *)set_node->pDescriptorUpdates[i];
2623
2624                            // Verify uniform and storage buffers actually are bound to valid memory at draw time.
2625                            if ((pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) ||
2626                                (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
2627                                (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) ||
2628                                (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
2629                                for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2630                                    auto buffer_node = dev_data->bufferMap.find(pWDS->pBufferInfo[j].buffer);
2631                                    if (buffer_node == dev_data->bufferMap.end()) {
2632                                        result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2633                                                          VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2634                                                          reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2635                                                          DRAWSTATE_INVALID_BUFFER, "DS",
2636                                                          "VkDescriptorSet (%#" PRIxLEAST64 ") %s (%#" PRIxLEAST64 ") at index #%u"
2637                                                          " is not defined!  Has vkCreateBuffer been called?",
2638                                                          reinterpret_cast<const uint64_t &>(set_node->set),
2639                                                          string_VkDescriptorType(pWDS->descriptorType),
2640                                                          reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), i);
2641                                    } else {
2642                                        auto mem_entry = dev_data->memObjMap.find(buffer_node->second.mem);
2643                                        if (mem_entry == dev_data->memObjMap.end()) {
2644                                            result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2645                                                              VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2646                                                              reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2647                                                              DRAWSTATE_INVALID_BUFFER, "DS",
2648                                                              "VkDescriptorSet (%#" PRIxLEAST64 ") %s (%#" PRIxLEAST64 ") at index"
2649                                                              " #%u, has no memory bound to it!",
2650                                                              reinterpret_cast<const uint64_t &>(set_node->set),
2651                                                              string_VkDescriptorType(pWDS->descriptorType),
2652                                                              reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), i);
2653                                        }
2654                                    }
2655                                    // If it's a dynamic buffer, make sure the offsets are within the buffer.
2656                                    if ((pWDS->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
2657                                        (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
2658                                        bufferSize = dev_data->bufferMap[pWDS->pBufferInfo[j].buffer].createInfo.size;
2659                                        uint32_t dynOffset =
2660                                            pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].dynamicOffsets[dynOffsetIndex];
2661                                        if (pWDS->pBufferInfo[j].range == VK_WHOLE_SIZE) {
2662                                            if ((dynOffset + pWDS->pBufferInfo[j].offset) > bufferSize) {
2663                                                result |= log_msg(
2664                                                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2665                                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2666                                                    reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2667                                                    DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
2668                                                    "VkDescriptorSet (%#" PRIxLEAST64 ") bound as set #%u has range of "
2669                                                    "VK_WHOLE_SIZE but dynamic offset %#" PRIxLEAST32 ". "
2670                                                    "combined with offset %#" PRIxLEAST64 " oversteps its buffer (%#" PRIxLEAST64
2671                                                    ") which has a size of %#" PRIxLEAST64 ".",
2672                                                    reinterpret_cast<const uint64_t &>(set_node->set), i, dynOffset,
2673                                                    pWDS->pBufferInfo[j].offset,
2674                                                    reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2675                                            }
2676                                        } else if ((dynOffset + pWDS->pBufferInfo[j].offset + pWDS->pBufferInfo[j].range) >
2677                                                   bufferSize) {
2678                                            result |=
2679                                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2680                                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2681                                                        reinterpret_cast<const uint64_t &>(set_node->set), __LINE__,
2682                                                        DRAWSTATE_DYNAMIC_OFFSET_OVERFLOW, "DS",
2683                                                        "VkDescriptorSet (%#" PRIxLEAST64
2684                                                        ") bound as set #%u has dynamic offset %#" PRIxLEAST32 ". "
2685                                                        "Combined with offset %#" PRIxLEAST64 " and range %#" PRIxLEAST64
2686                                                        " from its update, this oversteps its buffer "
2687                                                        "(%#" PRIxLEAST64 ") which has a size of %#" PRIxLEAST64 ".",
2688                                                        reinterpret_cast<const uint64_t &>(set_node->set), i, dynOffset,
2689                                                        pWDS->pBufferInfo[j].offset, pWDS->pBufferInfo[j].range,
2690                                                        reinterpret_cast<const uint64_t &>(pWDS->pBufferInfo[j].buffer), bufferSize);
2691                                        }
2692                                        dynOffsetIndex++;
2693                                    }
2694                                }
2695                            }
2696                            if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
2697                                for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2698                                    pCB->updateImages.insert(pWDS->pImageInfo[j].imageView);
2699                                }
2700                            } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
2701                                for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2702                                    assert(dev_data->bufferViewMap.find(pWDS->pTexelBufferView[j]) != dev_data->bufferViewMap.end());
2703                                    pCB->updateBuffers.insert(dev_data->bufferViewMap[pWDS->pTexelBufferView[j]].buffer);
2704                                }
2705                            } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2706                                       pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
2707                                for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2708                                    pCB->updateBuffers.insert(pWDS->pBufferInfo[j].buffer);
2709                                }
2710                            }
2711                            i += pWDS->descriptorCount; // Advance i to end of this set of descriptors (++i at end of for loop will move 1
2712                                                        // index past last of these descriptors)
2713                            break;
2714                        default: // Currently only shadowing Write update nodes so shouldn't get here
2715                            assert(0);
2716                            continue;
2717                        }
2718                    }
2719                }
2720            }
2721        }
2722    }
2723    return result;
2724}
2725// TODO : This is a temp function that naively updates bound storage images and buffers based on which descriptor sets are bound.
2726//   When validate_and_update_draw_state() handles computer shaders so that active_slots is correct for compute pipelines, this
2727//   function can be killed and validate_and_update_draw_state() used instead
2728static void update_shader_storage_images_and_buffers(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
2729    VkWriteDescriptorSet *pWDS = nullptr;
2730    SET_NODE *pSet = nullptr;
2731    // For the bound descriptor sets, pull off any storage images and buffers
2732    //  This may be more than are actually updated depending on which are active, but for now this is a stop-gap for compute
2733    //  pipelines
2734    for (auto set : pCB->lastBound[VK_PIPELINE_BIND_POINT_COMPUTE].uniqueBoundSets) {
2735        // Get the set node
2736        pSet = getSetNode(dev_data, set);
2737        // For each update in the set
2738        for (auto pUpdate : pSet->pDescriptorUpdates) {
2739            // If it's a write update to STORAGE type capture image/buffer being updated
2740            if (pUpdate && (pUpdate->sType == VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET)) {
2741                pWDS = reinterpret_cast<VkWriteDescriptorSet *>(pUpdate);
2742                if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) {
2743                    for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2744                        pCB->updateImages.insert(pWDS->pImageInfo[j].imageView);
2745                    }
2746                } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) {
2747                    for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2748                        pCB->updateBuffers.insert(dev_data->bufferViewMap[pWDS->pTexelBufferView[j]].buffer);
2749                    }
2750                } else if (pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2751                           pWDS->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
2752                    for (uint32_t j = 0; j < pWDS->descriptorCount; ++j) {
2753                        pCB->updateBuffers.insert(pWDS->pBufferInfo[j].buffer);
2754                    }
2755                }
2756            }
2757        }
2758    }
2759}
2760
2761// Validate overall state at the time of a draw call
2762static bool validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, const bool indexedDraw,
2763                                           const VkPipelineBindPoint bindPoint) {
2764    bool result = false;
2765    auto const &state = pCB->lastBound[bindPoint];
2766    PIPELINE_NODE *pPipe = getPipeline(my_data, state.pipeline);
2767    if (nullptr == pPipe) {
2768        result |= log_msg(
2769            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2770            DRAWSTATE_INVALID_PIPELINE, "DS",
2771            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
2772        // Early return as any further checks below will be busted w/o a pipeline
2773        if (result)
2774            return true;
2775    }
2776    // First check flag states
2777    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
2778        result = validate_draw_state_flags(my_data, pCB, pPipe, indexedDraw);
2779    else {
2780        // First block of code below to validate active sets should eventually
2781        //  work for the compute case but currently doesn't so return early for now
2782        // TODO : When active sets in compute shaders are correctly parsed,
2783        //  stop returning early here and handle them in top block below
2784        return result;
2785    }
2786
2787    // Now complete other state checks
2788    // TODO : When Compute shaders are properly parsed, fix this section to validate them as well
2789    if (state.pipelineLayout) {
2790        string errorString;
2791        // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
2792        vector<std::pair<SET_NODE *, unordered_set<uint32_t>>> activeSetBindingsPairs;
2793        for (auto setBindingPair : pPipe->active_slots) {
2794            uint32_t setIndex = setBindingPair.first;
2795            // If valid set is not bound throw an error
2796            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
2797                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2798                                  DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
2799                                  "VkPipeline %#" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline,
2800                                  setIndex);
2801            } else if (!verify_set_layout_compatibility(my_data, my_data->setMap[state.boundDescriptorSets[setIndex]],
2802                                                        pPipe->graphicsPipelineCI.layout, setIndex, errorString)) {
2803                // Set is bound but not compatible w/ overlapping pipelineLayout from PSO
2804                VkDescriptorSet setHandle = my_data->setMap[state.boundDescriptorSets[setIndex]]->set;
2805                result |=
2806                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2807                            (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
2808                            "VkDescriptorSet (%#" PRIxLEAST64
2809                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout %#" PRIxLEAST64 " due to: %s",
2810                            (uint64_t)setHandle, setIndex, (uint64_t)pPipe->graphicsPipelineCI.layout, errorString.c_str());
2811            } else { // Valid set is bound and layout compatible, validate that it's updated
2812                // Pull the set node
2813                SET_NODE *pSet = my_data->setMap[state.boundDescriptorSets[setIndex]];
2814                // Save vector of all active sets to verify dynamicOffsets below
2815                activeSetBindingsPairs.push_back(std::make_pair(pSet, setBindingPair.second));
2816                // Make sure set has been updated if it has no immutable samplers
2817                //  If it has immutable samplers, we'll flag error later as needed depending on binding
2818                if (!pSet->pUpdateStructs) {
2819                    for (auto binding : setBindingPair.second) {
2820                        if (!pSet->p_layout->GetImmutableSamplerPtrFromBinding(binding)) {
2821                            result |= log_msg(
2822                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2823                                (uint64_t)pSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2824                                "DS %#" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
2825                                "this will result in undefined behavior.",
2826                                (uint64_t)pSet->set);
2827                        }
2828                    }
2829                }
2830            }
2831        }
2832        // For given active slots, verify any dynamic descriptors and record updated images & buffers
2833        result |= validate_and_update_drawtime_descriptor_state(my_data, pCB, activeSetBindingsPairs);
2834    }
2835    // TODO : If/when compute pipelines/shaders are handled above, code below is only for gfx bind poing
2836    //if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint) {
2837    // Verify Vtx binding
2838    if (pPipe->vertexBindingDescriptions.size() > 0) {
2839        for (size_t i = 0; i < pPipe->vertexBindingDescriptions.size(); i++) {
2840            if ((pCB->currentDrawData.buffers.size() < (i + 1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) {
2841                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2842                                  __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2843                                  "The Pipeline State Object (%#" PRIxLEAST64
2844                                  ") expects that this Command Buffer's vertex binding Index " PRINTF_SIZE_T_SPECIFIER
2845                                  " should be set via vkCmdBindVertexBuffers.",
2846                                  (uint64_t)state.pipeline, i);
2847            }
2848        }
2849    } else {
2850        if (!pCB->currentDrawData.buffers.empty()) {
2851            result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
2852                              0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2853                              "Vertex buffers are bound to command buffer (%#" PRIxLEAST64
2854                              ") but no vertex buffers are attached to this Pipeline State Object (%#" PRIxLEAST64 ").",
2855                              (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline);
2856        }
2857    }
2858    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2859    // Skip check if rasterization is disabled or there is no viewport.
2860    if ((!pPipe->graphicsPipelineCI.pRasterizationState ||
2861         (pPipe->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
2862        pPipe->graphicsPipelineCI.pViewportState) {
2863        bool dynViewport = isDynamic(pPipe, VK_DYNAMIC_STATE_VIEWPORT);
2864        bool dynScissor = isDynamic(pPipe, VK_DYNAMIC_STATE_SCISSOR);
2865        if (dynViewport) {
2866            if (pCB->viewports.size() != pPipe->graphicsPipelineCI.pViewportState->viewportCount) {
2867                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2868                                  __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2869                                  "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER
2870                                  ", but PSO viewportCount is %u. These counts must match.",
2871                                  pCB->viewports.size(), pPipe->graphicsPipelineCI.pViewportState->viewportCount);
2872            }
2873        }
2874        if (dynScissor) {
2875            if (pCB->scissors.size() != pPipe->graphicsPipelineCI.pViewportState->scissorCount) {
2876                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2877                                  __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2878                                  "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER
2879                                  ", but PSO scissorCount is %u. These counts must match.",
2880                                  pCB->scissors.size(), pPipe->graphicsPipelineCI.pViewportState->scissorCount);
2881            }
2882        }
2883    }
2884    //} // end of "if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint) {" block
2885    return result;
2886}
2887
2888// Validate HW line width capabilities prior to setting requested line width.
2889static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) {
2890    bool skip_call = false;
2891
2892    // First check to see if the physical device supports wide lines.
2893    if ((VK_FALSE == my_data->phys_dev_properties.features.wideLines) && (1.0f != lineWidth)) {
2894        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__,
2895                             dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature "
2896                                            "not supported/enabled so lineWidth must be 1.0f!",
2897                             lineWidth);
2898    } else {
2899        // Otherwise, make sure the width falls in the valid range.
2900        if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
2901            (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
2902            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target,
2903                                 __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width "
2904                                                          "to between [%f, %f]!",
2905                                 lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0],
2906                                 my_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
2907        }
2908    }
2909
2910    return skip_call;
2911}
2912
2913// Verify that create state for a pipeline is valid
2914static bool verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
2915                                      int pipelineIndex) {
2916    bool skipCall = false;
2917
2918    PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
2919
2920    // If create derivative bit is set, check that we've specified a base
2921    // pipeline correctly, and that the base pipeline was created to allow
2922    // derivatives.
2923    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
2924        PIPELINE_NODE *pBasePipeline = nullptr;
2925        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
2926              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
2927            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2928                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
2929                                "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
2930        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
2931            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
2932                skipCall |=
2933                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2934                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
2935                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
2936            } else {
2937                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
2938            }
2939        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
2940            pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
2941        }
2942
2943        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
2944            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2945                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
2946                                "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
2947        }
2948    }
2949
2950    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
2951        if (!my_data->phys_dev_properties.features.independentBlend) {
2952            if (pPipeline->attachments.size() > 1) {
2953                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
2954                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
2955                    if ((pAttachments[0].blendEnable != pAttachments[i].blendEnable) ||
2956                        (pAttachments[0].srcColorBlendFactor != pAttachments[i].srcColorBlendFactor) ||
2957                        (pAttachments[0].dstColorBlendFactor != pAttachments[i].dstColorBlendFactor) ||
2958                        (pAttachments[0].colorBlendOp != pAttachments[i].colorBlendOp) ||
2959                        (pAttachments[0].srcAlphaBlendFactor != pAttachments[i].srcAlphaBlendFactor) ||
2960                        (pAttachments[0].dstAlphaBlendFactor != pAttachments[i].dstAlphaBlendFactor) ||
2961                        (pAttachments[0].alphaBlendOp != pAttachments[i].alphaBlendOp) ||
2962                        (pAttachments[0].colorWriteMask != pAttachments[i].colorWriteMask)) {
2963                        skipCall |=
2964                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2965                            DRAWSTATE_INDEPENDENT_BLEND, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
2966                            "enabled, all elements of pAttachments must be identical");
2967                    }
2968                }
2969            }
2970        }
2971        if (!my_data->phys_dev_properties.features.logicOp &&
2972            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
2973            skipCall |=
2974                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2975                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
2976                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
2977        }
2978        if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) &&
2979            ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) ||
2980             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
2981            skipCall |=
2982                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2983                        DRAWSTATE_INVALID_LOGIC_OP, "DS",
2984                        "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
2985        }
2986    }
2987
2988    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
2989    // produces nonsense errors that confuse users. Other layers should already
2990    // emit errors for renderpass being invalid.
2991    auto rp_data = my_data->renderPassMap.find(pPipeline->graphicsPipelineCI.renderPass);
2992    if (rp_data != my_data->renderPassMap.end() &&
2993        pPipeline->graphicsPipelineCI.subpass >= rp_data->second->pCreateInfo->subpassCount) {
2994        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2995                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
2996                                                                           "is out of range for this renderpass (0..%u)",
2997                            pPipeline->graphicsPipelineCI.subpass, rp_data->second->pCreateInfo->subpassCount - 1);
2998    }
2999
3000    if (!validate_and_capture_pipeline_shader_state(my_data, pPipeline)) {
3001        skipCall = true;
3002    }
3003    // Each shader's stage must be unique
3004    if (pPipeline->duplicate_shaders) {
3005        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
3006            if (pPipeline->duplicate_shaders & stage) {
3007                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3008                                    __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3009                                    "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
3010                                    string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
3011            }
3012        }
3013    }
3014    // VS is required
3015    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3016        skipCall |=
3017            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3018                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3019    }
3020    // Either both or neither TC/TE shaders should be defined
3021    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3022        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3023        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3024                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3025                            "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3026    }
3027    // Compute shaders should be specified independent of Gfx shaders
3028    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3029        (pPipeline->active_shaders &
3030         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3031          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3032        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3033                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3034                            "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3035    }
3036    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3037    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3038    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3039        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3040         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3041        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3042                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3043                                                                           "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3044                                                                           "topology for tessellation pipelines");
3045    }
3046    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3047        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3048        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3049            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3050                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3051                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3052                                                                               "topology is only valid for tessellation pipelines");
3053        }
3054        if (!pPipeline->graphicsPipelineCI.pTessellationState) {
3055            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3056                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3057                                "Invalid Pipeline CreateInfo State: "
3058                                "pTessellationState is NULL when VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3059                                "topology used. pTessellationState must not be NULL in this case.");
3060        } else if (!pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints ||
3061                   (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints > 32)) {
3062            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3063                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3064                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3065                                                                               "topology used with patchControlPoints value %u."
3066                                                                               " patchControlPoints should be >0 and <=32.",
3067                                pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints);
3068        }
3069    }
3070    // If a rasterization state is provided, make sure that the line width conforms to the HW.
3071    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3072        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
3073            skipCall |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, reinterpret_cast<uint64_t &>(pPipeline),
3074                                        pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
3075        }
3076    }
3077    // Viewport state must be included if rasterization is enabled.
3078    // If the viewport state is included, the viewport and scissor counts should always match.
3079    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3080    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3081        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3082        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3083            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3084                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3085                                                                           "and scissors are dynamic PSO must include "
3086                                                                           "viewportCount and scissorCount in pViewportState.");
3087        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3088                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3089            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3090                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3091                                "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3092                                pPipeline->graphicsPipelineCI.pViewportState->viewportCount,
3093                                pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3094        } else {
3095            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3096            bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3097            bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3098            if (!dynViewport) {
3099                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3100                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3101                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3102                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3103                                        "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3104                                        "must either include pViewports data, or include viewport in pDynamicState and set it with "
3105                                        "vkCmdSetViewport().",
3106                                        pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3107                }
3108            }
3109            if (!dynScissor) {
3110                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3111                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3112                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3113                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3114                                        "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3115                                        "must either include pScissors data, or include scissor in pDynamicState and set it with "
3116                                        "vkCmdSetScissor().",
3117                                        pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3118                }
3119            }
3120        }
3121    }
3122    return skipCall;
3123}
3124
3125// Free the Pipeline nodes
3126static void deletePipelines(layer_data *my_data) {
3127    if (my_data->pipelineMap.size() <= 0)
3128        return;
3129    for (auto &pipe_map_pair : my_data->pipelineMap) {
3130        delete pipe_map_pair.second;
3131    }
3132    my_data->pipelineMap.clear();
3133}
3134
3135// For given pipeline, return number of MSAA samples, or one if MSAA disabled
3136static VkSampleCountFlagBits getNumSamples(layer_data *my_data, const VkPipeline pipeline) {
3137    PIPELINE_NODE *pPipe = my_data->pipelineMap[pipeline];
3138    if (pPipe->graphicsPipelineCI.pMultisampleState &&
3139        (VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pPipe->graphicsPipelineCI.pMultisampleState->sType)) {
3140        return pPipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
3141    }
3142    return VK_SAMPLE_COUNT_1_BIT;
3143}
3144
3145// Validate state related to the PSO
3146static bool validatePipelineState(layer_data *my_data, const GLOBAL_CB_NODE *pCB, const VkPipelineBindPoint pipelineBindPoint,
3147                                  const VkPipeline pipeline) {
3148    bool skipCall = false;
3149    if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
3150        // Verify that any MSAA request in PSO matches sample# in bound FB
3151        // Skip the check if rasterization is disabled.
3152        PIPELINE_NODE *pPipeline = my_data->pipelineMap[pipeline];
3153        if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3154            (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3155            VkSampleCountFlagBits psoNumSamples = getNumSamples(my_data, pipeline);
3156            if (pCB->activeRenderPass) {
3157                const VkRenderPassCreateInfo *pRPCI = my_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
3158                const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
3159                VkSampleCountFlagBits subpassNumSamples = (VkSampleCountFlagBits)0;
3160                uint32_t i;
3161
3162                const VkPipelineColorBlendStateCreateInfo *pColorBlendState = pPipeline->graphicsPipelineCI.pColorBlendState;
3163                if ((pColorBlendState != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) &&
3164                    (pColorBlendState->attachmentCount != pSD->colorAttachmentCount)) {
3165                    return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3166                                   reinterpret_cast<const uint64_t &>(pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
3167                                   "Render pass subpass %u mismatch with blending state defined  and blend state attachment "
3168                                   "count %u but subpass color attachment count %u!  These must be the same.",
3169                                   pCB->activeSubpass, pColorBlendState->attachmentCount, pSD->colorAttachmentCount);
3170                }
3171
3172                for (i = 0; i < pSD->colorAttachmentCount; i++) {
3173                    VkSampleCountFlagBits samples;
3174
3175                    if (pSD->pColorAttachments[i].attachment == VK_ATTACHMENT_UNUSED)
3176                        continue;
3177
3178                    samples = pRPCI->pAttachments[pSD->pColorAttachments[i].attachment].samples;
3179                    if (subpassNumSamples == (VkSampleCountFlagBits)0) {
3180                        subpassNumSamples = samples;
3181                    } else if (subpassNumSamples != samples) {
3182                        subpassNumSamples = (VkSampleCountFlagBits)-1;
3183                        break;
3184                    }
3185                }
3186                if (pSD->pDepthStencilAttachment && pSD->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3187                    const VkSampleCountFlagBits samples = pRPCI->pAttachments[pSD->pDepthStencilAttachment->attachment].samples;
3188                    if (subpassNumSamples == (VkSampleCountFlagBits)0)
3189                        subpassNumSamples = samples;
3190                    else if (subpassNumSamples != samples)
3191                        subpassNumSamples = (VkSampleCountFlagBits)-1;
3192                }
3193
3194                if ((pSD->colorAttachmentCount > 0 || pSD->pDepthStencilAttachment) &&
3195                    psoNumSamples != subpassNumSamples) {
3196                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3197                                        (uint64_t)pipeline, __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3198                                        "Num samples mismatch! Binding PSO (%#" PRIxLEAST64
3199                                        ") with %u samples while current RenderPass (%#" PRIxLEAST64 ") w/ %u samples!",
3200                                        (uint64_t)pipeline, psoNumSamples, (uint64_t)pCB->activeRenderPass, subpassNumSamples);
3201                }
3202            } else {
3203                // TODO : I believe it's an error if we reach this point and don't have an activeRenderPass
3204                //   Verify and flag error as appropriate
3205            }
3206        }
3207        // TODO : Add more checks here
3208    } else {
3209        // TODO : Validate non-gfx pipeline updates
3210    }
3211    return skipCall;
3212}
3213
3214// Block of code at start here specifically for managing/tracking DSs
3215
3216// Return Pool node ptr for specified pool or else NULL
3217static DESCRIPTOR_POOL_NODE *getPoolNode(layer_data *my_data, const VkDescriptorPool pool) {
3218    if (my_data->descriptorPoolMap.find(pool) == my_data->descriptorPoolMap.end()) {
3219        return NULL;
3220    }
3221    return my_data->descriptorPoolMap[pool];
3222}
3223
3224// Return false if update struct is of valid type, otherwise flag error and return code from callback
3225static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3226    switch (pUpdateStruct->sType) {
3227    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3228    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3229        return false;
3230    default:
3231        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3232                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3233                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3234                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3235    }
3236}
3237
3238// Set count for given update struct in the last parameter
3239static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3240    switch (pUpdateStruct->sType) {
3241    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3242        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3243    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3244        // TODO : Need to understand this case better and make sure code is correct
3245        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3246    default:
3247        return 0;
3248    }
3249}
3250
3251// For given layout and update, return the first overall index of the layout that is updated
3252static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3253                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3254    return binding_start_index + arrayIndex;
3255}
3256// For given layout and update, return the last overall index of the layout that is updated
3257static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3258                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3259    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3260    return binding_start_index + arrayIndex + count - 1;
3261}
3262// Verify that the descriptor type in the update struct matches what's expected by the layout
3263static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type,
3264                                      const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3265    // First get actual type of update
3266    bool skipCall = false;
3267    VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM;
3268    switch (pUpdateStruct->sType) {
3269    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3270        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3271        break;
3272    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3273        /* no need to validate */
3274        return false;
3275        break;
3276    default:
3277        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3278                            DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3279                            "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3280                            string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3281    }
3282    if (!skipCall) {
3283        if (layout_type != actualType) {
3284            skipCall |= log_msg(
3285                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3286                DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3287                "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3288                string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type));
3289        }
3290    }
3291    return skipCall;
3292}
3293
3294// Determine the update type, allocate a new struct of that type, shadow the given pUpdate
3295//   struct into the pNewNode param. Return true if error condition encountered and callback signals early exit.
3296// NOTE : Calls to this function should be wrapped in mutex
3297static bool shadowUpdateNode(layer_data *my_data, const VkDevice device, GENERIC_HEADER *pUpdate, GENERIC_HEADER **pNewNode) {
3298    bool skipCall = false;
3299    VkWriteDescriptorSet *pWDS = NULL;
3300    VkCopyDescriptorSet *pCDS = NULL;
3301    switch (pUpdate->sType) {
3302    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3303        pWDS = new VkWriteDescriptorSet;
3304        *pNewNode = (GENERIC_HEADER *)pWDS;
3305        memcpy(pWDS, pUpdate, sizeof(VkWriteDescriptorSet));
3306
3307        switch (pWDS->descriptorType) {
3308        case VK_DESCRIPTOR_TYPE_SAMPLER:
3309        case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3310        case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3311        case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
3312            VkDescriptorImageInfo *info = new VkDescriptorImageInfo[pWDS->descriptorCount];
3313            memcpy(info, pWDS->pImageInfo, pWDS->descriptorCount * sizeof(VkDescriptorImageInfo));
3314            pWDS->pImageInfo = info;
3315        } break;
3316        case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3317        case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
3318            VkBufferView *info = new VkBufferView[pWDS->descriptorCount];
3319            memcpy(info, pWDS->pTexelBufferView, pWDS->descriptorCount * sizeof(VkBufferView));
3320            pWDS->pTexelBufferView = info;
3321        } break;
3322        case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3323        case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3324        case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3325        case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
3326            VkDescriptorBufferInfo *info = new VkDescriptorBufferInfo[pWDS->descriptorCount];
3327            memcpy(info, pWDS->pBufferInfo, pWDS->descriptorCount * sizeof(VkDescriptorBufferInfo));
3328            pWDS->pBufferInfo = info;
3329        } break;
3330        default:
3331            return true;
3332            break;
3333        }
3334        break;
3335    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3336        pCDS = new VkCopyDescriptorSet;
3337        *pNewNode = (GENERIC_HEADER *)pCDS;
3338        memcpy(pCDS, pUpdate, sizeof(VkCopyDescriptorSet));
3339        break;
3340    default:
3341        if (log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3342                    DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3343                    "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3344                    string_VkStructureType(pUpdate->sType), pUpdate->sType))
3345            return true;
3346    }
3347    // Make sure that pNext for the end of shadow copy is NULL
3348    (*pNewNode)->pNext = NULL;
3349    return skipCall;
3350}
3351
3352// Verify that given sampler is valid
3353static bool validateSampler(const layer_data *my_data, const VkSampler *pSampler, const bool immutable) {
3354    bool skipCall = false;
3355    auto sampIt = my_data->sampleMap.find(*pSampler);
3356    if (sampIt == my_data->sampleMap.end()) {
3357        if (!immutable) {
3358            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3359                                (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3360                                "vkUpdateDescriptorSets: Attempt to update descriptor with invalid sampler %#" PRIxLEAST64,
3361                                (uint64_t)*pSampler);
3362        } else { // immutable
3363            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3364                                (uint64_t)*pSampler, __LINE__, DRAWSTATE_SAMPLER_DESCRIPTOR_ERROR, "DS",
3365                                "vkUpdateDescriptorSets: Attempt to update descriptor whose binding has an invalid immutable "
3366                                "sampler %#" PRIxLEAST64,
3367                                (uint64_t)*pSampler);
3368        }
3369    } else {
3370        // TODO : Any further checks we want to do on the sampler?
3371    }
3372    return skipCall;
3373}
3374
3375//TODO: Consolidate functions
3376bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
3377    layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
3378    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3379        return false;
3380    }
3381    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3382    imgpair.subresource.aspectMask = aspectMask;
3383    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3384    if (imgsubIt == pCB->imageLayoutMap.end()) {
3385        return false;
3386    }
3387    if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
3388        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3389                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3390                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3391                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout));
3392    }
3393    if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
3394        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3395                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3396                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s",
3397                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout));
3398    }
3399    node = imgsubIt->second;
3400    return true;
3401}
3402
3403bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
3404    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3405        return false;
3406    }
3407    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3408    imgpair.subresource.aspectMask = aspectMask;
3409    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3410    if (imgsubIt == my_data->imageLayoutMap.end()) {
3411        return false;
3412    }
3413    if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
3414        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3415                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3416                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3417                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout));
3418    }
3419    layout = imgsubIt->second.layout;
3420    return true;
3421}
3422
3423// find layout(s) on the cmd buf level
3424bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3425    ImageSubresourcePair imgpair = {image, true, range};
3426    node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
3427    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
3428    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
3429    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
3430    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
3431    if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3432        imgpair = {image, false, VkImageSubresource()};
3433        auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3434        if (imgsubIt == pCB->imageLayoutMap.end())
3435            return false;
3436        node = imgsubIt->second;
3437    }
3438    return true;
3439}
3440
3441// find layout(s) on the global level
3442bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3443    layout = VK_IMAGE_LAYOUT_MAX_ENUM;
3444    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3445    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3446    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3447    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3448    if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3449        imgpair = {imgpair.image, false, VkImageSubresource()};
3450        auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3451        if (imgsubIt == my_data->imageLayoutMap.end())
3452            return false;
3453        layout = imgsubIt->second.layout;
3454    }
3455    return true;
3456}
3457
3458bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3459    ImageSubresourcePair imgpair = {image, true, range};
3460    return FindLayout(my_data, imgpair, layout);
3461}
3462
3463bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3464    auto sub_data = my_data->imageSubresourceMap.find(image);
3465    if (sub_data == my_data->imageSubresourceMap.end())
3466        return false;
3467    auto imgIt = my_data->imageMap.find(image);
3468    if (imgIt == my_data->imageMap.end())
3469        return false;
3470    bool ignoreGlobal = false;
3471    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3472    // potential errors in this case.
3473    if (sub_data->second.size() >= (imgIt->second.createInfo.arrayLayers * imgIt->second.createInfo.mipLevels + 1)) {
3474        ignoreGlobal = true;
3475    }
3476    for (auto imgsubpair : sub_data->second) {
3477        if (ignoreGlobal && !imgsubpair.hasSubresource)
3478            continue;
3479        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3480        if (img_data != my_data->imageLayoutMap.end()) {
3481            layouts.push_back(img_data->second.layout);
3482        }
3483    }
3484    return true;
3485}
3486
3487// Set the layout on the global level
3488void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3489    VkImage &image = imgpair.image;
3490    // TODO (mlentine): Maybe set format if new? Not used atm.
3491    my_data->imageLayoutMap[imgpair].layout = layout;
3492    // TODO (mlentine): Maybe make vector a set?
3493    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3494    if (subresource == my_data->imageSubresourceMap[image].end()) {
3495        my_data->imageSubresourceMap[image].push_back(imgpair);
3496    }
3497}
3498
3499// Set the layout on the cmdbuf level
3500void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3501    pCB->imageLayoutMap[imgpair] = node;
3502    // TODO (mlentine): Maybe make vector a set?
3503    auto subresource =
3504        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3505    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3506        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3507    }
3508}
3509
3510void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3511    // TODO (mlentine): Maybe make vector a set?
3512    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3513        pCB->imageSubresourceMap[imgpair.image].end()) {
3514        pCB->imageLayoutMap[imgpair].layout = layout;
3515    } else {
3516        // TODO (mlentine): Could be expensive and might need to be removed.
3517        assert(imgpair.hasSubresource);
3518        IMAGE_CMD_BUF_LAYOUT_NODE node;
3519        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3520            node.initialLayout = layout;
3521        }
3522        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3523    }
3524}
3525
3526template <class OBJECT, class LAYOUT>
3527void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3528    if (imgpair.subresource.aspectMask & aspectMask) {
3529        imgpair.subresource.aspectMask = aspectMask;
3530        SetLayout(pObject, imgpair, layout);
3531    }
3532}
3533
3534template <class OBJECT, class LAYOUT>
3535void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3536    ImageSubresourcePair imgpair = {image, true, range};
3537    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3538    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3539    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3540    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3541}
3542
3543template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3544    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3545    SetLayout(pObject, image, imgpair, layout);
3546}
3547
3548void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3549    auto image_view_data = dev_data->imageViewMap.find(imageView);
3550    assert(image_view_data != dev_data->imageViewMap.end());
3551    const VkImage &image = image_view_data->second.image;
3552    const VkImageSubresourceRange &subRange = image_view_data->second.subresourceRange;
3553    // TODO: Do not iterate over every possibility - consolidate where possible
3554    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3555        uint32_t level = subRange.baseMipLevel + j;
3556        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3557            uint32_t layer = subRange.baseArrayLayer + k;
3558            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3559            SetLayout(pCB, image, sub, layout);
3560        }
3561    }
3562}
3563
3564// Verify that given imageView is valid
3565static bool validateImageView(const layer_data *my_data, const VkImageView *pImageView, const VkImageLayout imageLayout) {
3566    bool skipCall = false;
3567    auto ivIt = my_data->imageViewMap.find(*pImageView);
3568    if (ivIt == my_data->imageViewMap.end()) {
3569        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3570                            (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3571                            "vkUpdateDescriptorSets: Attempt to update descriptor with invalid imageView %#" PRIxLEAST64,
3572                            (uint64_t)*pImageView);
3573    } else {
3574        // Validate that imageLayout is compatible with aspectMask and image format
3575        VkImageAspectFlags aspectMask = ivIt->second.subresourceRange.aspectMask;
3576        VkImage image = ivIt->second.image;
3577        // TODO : Check here in case we have a bad image
3578        VkFormat format = VK_FORMAT_MAX_ENUM;
3579        auto imgIt = my_data->imageMap.find(image);
3580        if (imgIt != my_data->imageMap.end()) {
3581            format = (*imgIt).second.createInfo.format;
3582        } else {
3583            // Also need to check the swapchains.
3584            auto swapchainIt = my_data->device_extensions.imageToSwapchainMap.find(image);
3585            if (swapchainIt != my_data->device_extensions.imageToSwapchainMap.end()) {
3586                VkSwapchainKHR swapchain = swapchainIt->second;
3587                auto swapchain_nodeIt = my_data->device_extensions.swapchainMap.find(swapchain);
3588                if (swapchain_nodeIt != my_data->device_extensions.swapchainMap.end()) {
3589                    SWAPCHAIN_NODE *pswapchain_node = swapchain_nodeIt->second;
3590                    format = pswapchain_node->createInfo.imageFormat;
3591                }
3592            }
3593        }
3594        if (format == VK_FORMAT_MAX_ENUM) {
3595            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3596                                (uint64_t)image, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3597                                "vkUpdateDescriptorSets: Attempt to update descriptor with invalid image %#" PRIxLEAST64
3598                                " in imageView %#" PRIxLEAST64,
3599                                (uint64_t)image, (uint64_t)*pImageView);
3600        } else {
3601            bool ds = vk_format_is_depth_or_stencil(format);
3602            switch (imageLayout) {
3603            case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
3604                // Only Color bit must be set
3605                if ((aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
3606                    skipCall |=
3607                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3608                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3609                                "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3610                                "and imageView %#" PRIxLEAST64 ""
3611                                " that does not have VK_IMAGE_ASPECT_COLOR_BIT set.",
3612                                (uint64_t)*pImageView);
3613                }
3614                // format must NOT be DS
3615                if (ds) {
3616                    skipCall |=
3617                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3618                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3619                                "vkUpdateDescriptorSets: Updating descriptor with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL "
3620                                "and imageView %#" PRIxLEAST64 ""
3621                                " but the image format is %s which is not a color format.",
3622                                (uint64_t)*pImageView, string_VkFormat(format));
3623                }
3624                break;
3625            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
3626            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
3627                // Depth or stencil bit must be set, but both must NOT be set
3628                if (aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
3629                    if (aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
3630                        // both  must NOT be set
3631                        skipCall |=
3632                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3633                                    (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3634                                    "vkUpdateDescriptorSets: Updating descriptor with imageView %#" PRIxLEAST64 ""
3635                                    " that has both STENCIL and DEPTH aspects set",
3636                                    (uint64_t)*pImageView);
3637                    }
3638                } else if (!(aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
3639                    // Neither were set
3640                    skipCall |=
3641                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3642                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_INVALID_IMAGE_ASPECT, "DS",
3643                                "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3644                                " that does not have STENCIL or DEPTH aspect set.",
3645                                string_VkImageLayout(imageLayout), (uint64_t)*pImageView);
3646                }
3647                // format must be DS
3648                if (!ds) {
3649                    skipCall |=
3650                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
3651                                (uint64_t)*pImageView, __LINE__, DRAWSTATE_IMAGEVIEW_DESCRIPTOR_ERROR, "DS",
3652                                "vkUpdateDescriptorSets: Updating descriptor with layout %s and imageView %#" PRIxLEAST64 ""
3653                                " but the image format is %s which is not a depth/stencil format.",
3654                                string_VkImageLayout(imageLayout), (uint64_t)*pImageView, string_VkFormat(format));
3655                }
3656                break;
3657            default:
3658                // anything to check for other layouts?
3659                break;
3660            }
3661        }
3662    }
3663    return skipCall;
3664}
3665
3666// Verify that given bufferView is valid
3667static bool validateBufferView(const layer_data *my_data, const VkBufferView *pBufferView) {
3668    bool skipCall = false;
3669    auto sampIt = my_data->bufferViewMap.find(*pBufferView);
3670    if (sampIt == my_data->bufferViewMap.end()) {
3671        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT,
3672                            (uint64_t)*pBufferView, __LINE__, DRAWSTATE_BUFFERVIEW_DESCRIPTOR_ERROR, "DS",
3673                            "vkUpdateDescriptorSets: Attempt to update descriptor with invalid bufferView %#" PRIxLEAST64,
3674                            (uint64_t)*pBufferView);
3675    } else {
3676        // TODO : Any further checks we want to do on the bufferView?
3677    }
3678    return skipCall;
3679}
3680
3681// Verify that given bufferInfo is valid
3682static bool validateBufferInfo(const layer_data *my_data, const VkDescriptorBufferInfo *pBufferInfo) {
3683    bool skipCall = false;
3684    auto sampIt = my_data->bufferMap.find(pBufferInfo->buffer);
3685    if (sampIt == my_data->bufferMap.end()) {
3686        skipCall |=
3687            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
3688                    (uint64_t)pBufferInfo->buffer, __LINE__, DRAWSTATE_BUFFERINFO_DESCRIPTOR_ERROR, "DS",
3689                    "vkUpdateDescriptorSets: Attempt to update descriptor where bufferInfo has invalid buffer %#" PRIxLEAST64,
3690                    (uint64_t)pBufferInfo->buffer);
3691    } else {
3692        // TODO : Any further checks we want to do on the bufferView?
3693    }
3694    return skipCall;
3695}
3696
3697static bool validateUpdateContents(const layer_data *my_data, const VkWriteDescriptorSet *pWDS,
3698                                   const VkSampler *pImmutableSamplers) {
3699    bool skipCall = false;
3700    // First verify that for the given Descriptor type, the correct DescriptorInfo data is supplied
3701    const VkSampler *pSampler = NULL;
3702    bool immutable = false;
3703    uint32_t i = 0;
3704    // For given update type, verify that update contents are correct
3705    switch (pWDS->descriptorType) {
3706    case VK_DESCRIPTOR_TYPE_SAMPLER:
3707        for (i = 0; i < pWDS->descriptorCount; ++i) {
3708            skipCall |= validateSampler(my_data, &(pWDS->pImageInfo[i].sampler), immutable);
3709        }
3710        break;
3711    case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3712        for (i = 0; i < pWDS->descriptorCount; ++i) {
3713            if (NULL == pImmutableSamplers) {
3714                pSampler = &(pWDS->pImageInfo[i].sampler);
3715                if (immutable) {
3716                    skipCall |= log_msg(
3717                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3718                        (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3719                        "vkUpdateDescriptorSets: Update #%u is not an immutable sampler %#" PRIxLEAST64
3720                        ", but previous update(s) from this "
3721                        "VkWriteDescriptorSet struct used an immutable sampler. All updates from a single struct must either "
3722                        "use immutable or non-immutable samplers.",
3723                        i, (uint64_t)*pSampler);
3724                }
3725            } else {
3726                if (i > 0 && !immutable) {
3727                    skipCall |= log_msg(
3728                        my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT,
3729                        (uint64_t)*pSampler, __LINE__, DRAWSTATE_INCONSISTENT_IMMUTABLE_SAMPLER_UPDATE, "DS",
3730                        "vkUpdateDescriptorSets: Update #%u is an immutable sampler, but previous update(s) from this "
3731                        "VkWriteDescriptorSet struct used a non-immutable sampler. All updates from a single struct must either "
3732                        "use immutable or non-immutable samplers.",
3733                        i);
3734                }
3735                immutable = true;
3736                pSampler = &(pImmutableSamplers[i]);
3737            }
3738            skipCall |= validateSampler(my_data, pSampler, immutable);
3739        }
3740    // Intentionally fall through here to also validate image stuff
3741    case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3742    case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
3743    case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
3744        for (i = 0; i < pWDS->descriptorCount; ++i) {
3745            skipCall |= validateImageView(my_data, &(pWDS->pImageInfo[i].imageView), pWDS->pImageInfo[i].imageLayout);
3746        }
3747        break;
3748    case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3749    case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
3750        for (i = 0; i < pWDS->descriptorCount; ++i) {
3751            skipCall |= validateBufferView(my_data, &(pWDS->pTexelBufferView[i]));
3752        }
3753        break;
3754    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3755    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3756    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3757    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
3758        for (i = 0; i < pWDS->descriptorCount; ++i) {
3759            skipCall |= validateBufferInfo(my_data, &(pWDS->pBufferInfo[i]));
3760        }
3761        break;
3762    default:
3763        break;
3764    }
3765    return skipCall;
3766}
3767// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3768// func_str is the name of the calling function
3769// Return false if no errors occur
3770// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
3771static bool validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
3772    bool skip_call = false;
3773    auto set_node = my_data->setMap.find(set);
3774    if (set_node == my_data->setMap.end()) {
3775        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3776                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3777                             "Cannot call %s() on descriptor set %" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3778                             (uint64_t)(set));
3779    } else {
3780        if (set_node->second->in_use.load()) {
3781            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3782                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
3783                                 "DS", "Cannot call %s() on descriptor set %" PRIxLEAST64 " that is in use by a command buffer.",
3784                                 func_str.c_str(), (uint64_t)(set));
3785        }
3786    }
3787    return skip_call;
3788}
3789static void invalidateBoundCmdBuffers(layer_data *dev_data, const SET_NODE *pSet) {
3790    // Flag any CBs this set is bound to as INVALID
3791    for (auto cb : pSet->boundCmdBuffers) {
3792        auto cb_node = dev_data->commandBufferMap.find(cb);
3793        if (cb_node != dev_data->commandBufferMap.end()) {
3794            cb_node->second->state = CB_INVALID;
3795        }
3796    }
3797}
3798// update DS mappings based on write and copy update arrays
3799static bool dsUpdate(layer_data *my_data, VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pWDS,
3800                     uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pCDS) {
3801    bool skipCall = false;
3802    // Validate Write updates
3803    uint32_t i = 0;
3804    for (i = 0; i < descriptorWriteCount; i++) {
3805        VkDescriptorSet ds = pWDS[i].dstSet;
3806        SET_NODE *pSet = my_data->setMap[ds];
3807        // Set being updated cannot be in-flight
3808        if ((skipCall = validateIdleDescriptorSet(my_data, ds, "VkUpdateDescriptorSets")) == true)
3809            return skipCall;
3810        // If set is bound to any cmdBuffers, mark them invalid
3811        invalidateBoundCmdBuffers(my_data, pSet);
3812        GENERIC_HEADER *pUpdate = (GENERIC_HEADER *)&pWDS[i];
3813        auto layout_node = pSet->p_layout;
3814        // First verify valid update struct
3815        if ((skipCall = validUpdateStruct(my_data, device, pUpdate)) == true) {
3816            break;
3817        }
3818        uint32_t binding = 0, endIndex = 0;
3819        binding = pWDS[i].dstBinding;
3820        // Make sure that layout being updated has the binding being updated
3821        if (!layout_node->HasBinding(binding)) {
3822            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3823                                (uint64_t)(ds), __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
3824                                "Descriptor Set %" PRIu64 " does not have binding to match "
3825                                "update binding %u for update type "
3826                                "%s!",
3827                                (uint64_t)(ds), binding, string_VkStructureType(pUpdate->sType));
3828        } else {
3829            // Next verify that update falls within size of given binding
3830            endIndex = getUpdateEndIndex(my_data, device, layout_node->GetGlobalStartIndexFromBinding(binding),
3831                                         pWDS[i].dstArrayElement, pUpdate);
3832            if (layout_node->GetGlobalEndIndexFromBinding(binding) < endIndex) {
3833                auto ds_layout = layout_node->GetDescriptorSetLayout();
3834                skipCall |=
3835                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3836                            reinterpret_cast<uint64_t &>(ds), __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
3837                            "Descriptor update type of %s is out of bounds for matching binding %u in Layout %" PRIu64 "!",
3838                            string_VkStructureType(pUpdate->sType), binding, reinterpret_cast<uint64_t &>(ds_layout));
3839            } else { // TODO : should we skip update on a type mismatch or force it?
3840                uint32_t startIndex;
3841                startIndex = getUpdateStartIndex(my_data, device, layout_node->GetGlobalStartIndexFromBinding(binding),
3842                                                 pWDS[i].dstArrayElement, pUpdate);
3843                const auto & layout_binding = layout_node->GetDescriptorSetLayoutBindingPtrFromBinding(binding);
3844                // Layout bindings match w/ update, now verify that update type & stageFlags are the same for entire update
3845                if ((skipCall = validateUpdateConsistency(my_data, device, layout_binding->descriptorType, pUpdate, startIndex,
3846                                                          endIndex)) == false) {
3847                    // The update is within bounds and consistent, but need to
3848                    // make sure contents make sense as well
3849                    if ((skipCall = validateUpdateContents(my_data, &pWDS[i], layout_binding->pImmutableSamplers)) == false) {
3850                        // Update is good. Save the update info
3851                        // Create new update struct for this set's shadow copy
3852                        GENERIC_HEADER *pNewNode = NULL;
3853                        skipCall |= shadowUpdateNode(my_data, device, pUpdate, &pNewNode);
3854                        if (NULL == pNewNode) {
3855                            skipCall |= log_msg(
3856                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3857                                (uint64_t)(ds), __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
3858                                "Out of memory while attempting to allocate UPDATE struct in vkUpdateDescriptors()");
3859                        } else {
3860                            // Insert shadow node into LL of updates for this set
3861                            pNewNode->pNext = pSet->pUpdateStructs;
3862                            pSet->pUpdateStructs = pNewNode;
3863                            // Now update appropriate descriptor(s) to point to new Update node
3864                            for (uint32_t j = startIndex; j <= endIndex; j++) {
3865                                assert(j < pSet->descriptorCount);
3866                                pSet->pDescriptorUpdates[j] = pNewNode;
3867                            }
3868                        }
3869                    }
3870                }
3871            }
3872        }
3873    }
3874    // Now validate copy updates
3875    for (i = 0; i < descriptorCopyCount; ++i) {
3876        SET_NODE *pSrcSet = NULL, *pDstSet = NULL;
3877        uint32_t srcStartIndex = 0, srcEndIndex = 0, dstStartIndex = 0, dstEndIndex = 0;
3878        // For each copy make sure that update falls within given layout and that types match
3879        pSrcSet = my_data->setMap[pCDS[i].srcSet];
3880        pDstSet = my_data->setMap[pCDS[i].dstSet];
3881        // Set being updated cannot be in-flight
3882        if ((skipCall = validateIdleDescriptorSet(my_data, pDstSet->set, "VkUpdateDescriptorSets")) == true)
3883            return skipCall;
3884        invalidateBoundCmdBuffers(my_data, pDstSet);
3885        auto src_layout_node = pSrcSet->p_layout;
3886        auto dst_layout_node = pDstSet->p_layout;
3887        // Validate that src binding is valid for src set layout
3888        if (!src_layout_node->HasBinding(pCDS[i].srcBinding)) {
3889            auto s_layout = src_layout_node->GetDescriptorSetLayout();
3890            skipCall |=
3891                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3892                        (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
3893                        "Copy descriptor update %u has srcBinding %u "
3894                        "which is out of bounds for underlying SetLayout "
3895                        "%#" PRIxLEAST64 " which only has bindings 0-%u.",
3896                        i, pCDS[i].srcBinding, reinterpret_cast<uint64_t &>(s_layout), src_layout_node->GetBindingCount() - 1);
3897        } else if (!dst_layout_node->HasBinding(pCDS[i].dstBinding)) {
3898            auto d_layout = dst_layout_node->GetDescriptorSetLayout();
3899            skipCall |=
3900                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3901                        (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_INVALID_UPDATE_INDEX, "DS",
3902                        "Copy descriptor update %u has dstBinding %u "
3903                        "which is out of bounds for underlying SetLayout "
3904                        "%#" PRIxLEAST64 " which only has bindings 0-%u.",
3905                        i, pCDS[i].dstBinding, reinterpret_cast<uint64_t &>(d_layout), dst_layout_node->GetBindingCount() - 1);
3906        } else {
3907            // Proceed with validation. Bindings are ok, but make sure update is within bounds of given layout and binding
3908            srcEndIndex = getUpdateEndIndex(my_data, device, src_layout_node->GetGlobalStartIndexFromBinding(pCDS[i].srcBinding),
3909                                            pCDS[i].srcArrayElement, (const GENERIC_HEADER *)&(pCDS[i]));
3910            dstEndIndex = getUpdateEndIndex(my_data, device, dst_layout_node->GetGlobalStartIndexFromBinding(pCDS[i].dstBinding),
3911                                            pCDS[i].dstArrayElement, (const GENERIC_HEADER *)&(pCDS[i]));
3912            if (src_layout_node->GetGlobalEndIndexFromBinding(pCDS[i].srcBinding) < srcEndIndex) {
3913                auto s_layout = src_layout_node->GetDescriptorSetLayout();
3914                skipCall |=
3915                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3916                            (uint64_t)pSrcSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
3917                            "Copy descriptor src update is out of bounds for matching binding %u in Layout %" PRIu64 "!",
3918                            pCDS[i].srcBinding, reinterpret_cast<uint64_t &>(s_layout));
3919            } else if (dst_layout_node->GetGlobalEndIndexFromBinding(pCDS[i].dstBinding) < dstEndIndex) {
3920                auto d_layout = dst_layout_node->GetDescriptorSetLayout();
3921                skipCall |=
3922                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3923                            (uint64_t)pDstSet->set, __LINE__, DRAWSTATE_DESCRIPTOR_UPDATE_OUT_OF_BOUNDS, "DS",
3924                            "Copy descriptor dest update is out of bounds for matching binding %u in Layout %" PRIu64 "!",
3925                            pCDS[i].dstBinding, reinterpret_cast<uint64_t &>(d_layout));
3926            } else {
3927                srcStartIndex =
3928                    getUpdateStartIndex(my_data, device, src_layout_node->GetGlobalStartIndexFromBinding(pCDS[i].srcBinding),
3929                                        pCDS[i].srcArrayElement, (const GENERIC_HEADER *)&(pCDS[i]));
3930                dstStartIndex =
3931                    getUpdateStartIndex(my_data, device, dst_layout_node->GetGlobalStartIndexFromBinding(pCDS[i].dstBinding),
3932                                        pCDS[i].dstArrayElement, (const GENERIC_HEADER *)&(pCDS[i]));
3933                auto s_binding = src_layout_node->GetDescriptorSetLayoutBindingPtrFromBinding(pCDS[i].srcBinding);
3934                auto d_binding = dst_layout_node->GetDescriptorSetLayoutBindingPtrFromBinding(pCDS[i].dstBinding);
3935                // For copy, just make sure types match and then perform update
3936                if (s_binding->descriptorType != d_binding->descriptorType) {
3937                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3938                                        __LINE__, DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3939                                        "Copy descriptor update index %u, has src update descriptor type %s "
3940                                        "that does not match overlapping dest descriptor type of %s!",
3941                                        i, string_VkDescriptorType(s_binding->descriptorType),
3942                                        string_VkDescriptorType(d_binding->descriptorType));
3943                } else {
3944                    for (uint32_t j = 0; j < pCDS[i].descriptorCount; ++j) {
3945                        // point dst descriptor at corresponding src descriptor
3946                        // TODO : This may be a hole. I believe copy should be its own copy,
3947                        //  otherwise a subsequent write update to src will incorrectly affect the copy
3948                        pDstSet->pDescriptorUpdates[j + dstStartIndex] = pSrcSet->pDescriptorUpdates[j + srcStartIndex];
3949                        pDstSet->pUpdateStructs = pSrcSet->pUpdateStructs;
3950                    }
3951                }
3952            }
3953        }
3954    }
3955    return skipCall;
3956}
3957
3958// Verify that given pool has descriptors that are being requested for allocation.
3959// NOTE : Calls to this function should be wrapped in mutex
3960static bool validate_descriptor_availability_in_pool(layer_data *dev_data, DESCRIPTOR_POOL_NODE *pPoolNode, uint32_t count,
3961                                                     const VkDescriptorSetLayout *pSetLayouts) {
3962    bool skipCall = false;
3963    uint32_t i = 0;
3964    uint32_t j = 0;
3965
3966    // Track number of descriptorSets allowable in this pool
3967    if (pPoolNode->availableSets < count) {
3968        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
3969                            reinterpret_cast<uint64_t &>(pPoolNode->pool), __LINE__, DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
3970                            "Unable to allocate %u descriptorSets from pool %#" PRIxLEAST64
3971                            ". This pool only has %d descriptorSets remaining.",
3972                            count, reinterpret_cast<uint64_t &>(pPoolNode->pool), pPoolNode->availableSets);
3973    } else {
3974        pPoolNode->availableSets -= count;
3975    }
3976
3977    for (i = 0; i < count; ++i) {
3978        auto layout_pair = dev_data->descriptorSetLayoutMap.find(pSetLayouts[i]);
3979        if (layout_pair == dev_data->descriptorSetLayoutMap.end()) {
3980            skipCall |=
3981                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
3982                        (uint64_t)pSetLayouts[i], __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3983                        "Unable to find set layout node for layout %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
3984                        (uint64_t)pSetLayouts[i]);
3985        } else {
3986            uint32_t typeIndex = 0, poolSizeCount = 0;
3987            auto &layout_node = layout_pair->second;
3988            for (j = 0; j < layout_node->GetBindingCount(); ++j) {
3989                const auto &binding_layout = layout_node->GetDescriptorSetLayoutBindingPtrFromIndex(j);
3990                typeIndex = static_cast<uint32_t>(binding_layout->descriptorType);
3991                poolSizeCount = binding_layout->descriptorCount;
3992                if (poolSizeCount > pPoolNode->availableDescriptorTypeCount[typeIndex]) {
3993                    skipCall |= log_msg(
3994                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT,
3995                        reinterpret_cast<const uint64_t &>(pSetLayouts[i]), __LINE__, DRAWSTATE_DESCRIPTOR_POOL_EMPTY, "DS",
3996                        "Unable to allocate %u descriptors of type %s from pool %#" PRIxLEAST64
3997                        ". This pool only has %d descriptors of this type remaining.",
3998                        poolSizeCount, string_VkDescriptorType(binding_layout->descriptorType), (uint64_t)pPoolNode->pool,
3999                        pPoolNode->availableDescriptorTypeCount[typeIndex]);
4000                } else { // Decrement available descriptors of this type
4001                    pPoolNode->availableDescriptorTypeCount[typeIndex] -= poolSizeCount;
4002                }
4003            }
4004        }
4005    }
4006    return skipCall;
4007}
4008
4009// Free the shadowed update node for this Set
4010// NOTE : Calls to this function should be wrapped in mutex
4011static void freeShadowUpdateTree(SET_NODE *pSet) {
4012    GENERIC_HEADER *pShadowUpdate = pSet->pUpdateStructs;
4013    pSet->pUpdateStructs = NULL;
4014    GENERIC_HEADER *pFreeUpdate = pShadowUpdate;
4015    // Clear the descriptor mappings as they will now be invalid
4016    pSet->pDescriptorUpdates.clear();
4017    while (pShadowUpdate) {
4018        pFreeUpdate = pShadowUpdate;
4019        pShadowUpdate = (GENERIC_HEADER *)pShadowUpdate->pNext;
4020        VkWriteDescriptorSet *pWDS = NULL;
4021        switch (pFreeUpdate->sType) {
4022        case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
4023            pWDS = (VkWriteDescriptorSet *)pFreeUpdate;
4024            switch (pWDS->descriptorType) {
4025            case VK_DESCRIPTOR_TYPE_SAMPLER:
4026            case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
4027            case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
4028            case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
4029                delete[] pWDS->pImageInfo;
4030            } break;
4031            case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
4032            case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
4033                delete[] pWDS->pTexelBufferView;
4034            } break;
4035            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
4036            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
4037            case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
4038            case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
4039                delete[] pWDS->pBufferInfo;
4040            } break;
4041            default:
4042                break;
4043            }
4044            break;
4045        case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
4046            break;
4047        default:
4048            assert(0);
4049            break;
4050        }
4051        delete pFreeUpdate;
4052    }
4053}
4054
4055// Free all DS Pools including their Sets & related sub-structs
4056// NOTE : Calls to this function should be wrapped in mutex
4057static void deletePools(layer_data *my_data) {
4058    if (my_data->descriptorPoolMap.size() <= 0)
4059        return;
4060    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
4061        SET_NODE *pSet = (*ii).second->pSets;
4062        SET_NODE *pFreeSet = pSet;
4063        while (pSet) {
4064            pFreeSet = pSet;
4065            pSet = pSet->pNext;
4066            // Free Update shadow struct tree
4067            freeShadowUpdateTree(pFreeSet);
4068            delete pFreeSet;
4069        }
4070        delete (*ii).second;
4071    }
4072    my_data->descriptorPoolMap.clear();
4073}
4074
4075// Currently clearing a set is removing all previous updates to that set
4076//  TODO : Validate if this is correct clearing behavior
4077static void clearDescriptorSet(layer_data *my_data, VkDescriptorSet set) {
4078    SET_NODE *pSet = getSetNode(my_data, set);
4079    if (!pSet) {
4080        // TODO : Return error
4081    } else {
4082        freeShadowUpdateTree(pSet);
4083    }
4084}
4085
4086static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
4087                                VkDescriptorPoolResetFlags flags) {
4088    DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
4089    if (!pPool) {
4090        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
4091                (uint64_t)pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
4092                "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t)pool);
4093    } else {
4094        // TODO: validate flags
4095        // For every set off of this pool, clear it, remove from setMap, and free SET_NODE
4096        SET_NODE *pSet = pPool->pSets;
4097        SET_NODE *pFreeSet = pSet;
4098        while (pSet) {
4099            clearDescriptorSet(my_data, pSet->set);
4100            my_data->setMap.erase(pSet->set);
4101            pFreeSet = pSet;
4102            pSet = pSet->pNext;
4103            delete pFreeSet;
4104        }
4105        pPool->pSets = nullptr;
4106        // Reset available count for each type and available sets for this pool
4107        for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
4108            pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
4109        }
4110        pPool->availableSets = pPool->maxSets;
4111    }
4112}
4113
4114// For given CB object, fetch associated CB Node from map
4115static GLOBAL_CB_NODE *getCBNode(layer_data *my_data, const VkCommandBuffer cb) {
4116    if (my_data->commandBufferMap.count(cb) == 0) {
4117        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4118                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4119                "Attempt to use CommandBuffer %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
4120        return NULL;
4121    }
4122    return my_data->commandBufferMap[cb];
4123}
4124
4125// Free all CB Nodes
4126// NOTE : Calls to this function should be wrapped in mutex
4127static void deleteCommandBuffers(layer_data *my_data) {
4128    if (my_data->commandBufferMap.empty()) {
4129        return;
4130    }
4131    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
4132        delete (*ii).second;
4133    }
4134    my_data->commandBufferMap.clear();
4135}
4136
4137static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
4138    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4139                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
4140                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
4141}
4142
4143bool validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
4144    if (!pCB->activeRenderPass)
4145        return false;
4146    bool skip_call = false;
4147    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS && cmd_type != CMD_EXECUTECOMMANDS) {
4148        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4149                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4150                             "Commands cannot be called in a subpass using secondary command buffers.");
4151    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
4152        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4153                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4154                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
4155    }
4156    return skip_call;
4157}
4158
4159static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4160    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
4161        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4162                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4163                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4164    return false;
4165}
4166
4167static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4168    if (!(flags & VK_QUEUE_COMPUTE_BIT))
4169        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4170                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4171                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
4172    return false;
4173}
4174
4175static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
4176    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
4177        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4178                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4179                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
4180    return false;
4181}
4182
4183// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
4184//  in the recording state or if there's an issue with the Cmd ordering
4185static bool addCmd(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
4186    bool skipCall = false;
4187    auto pool_data = my_data->commandPoolMap.find(pCB->createInfo.commandPool);
4188    if (pool_data != my_data->commandPoolMap.end()) {
4189        VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pool_data->second.queueFamilyIndex].queueFlags;
4190        switch (cmd) {
4191        case CMD_BINDPIPELINE:
4192        case CMD_BINDPIPELINEDELTA:
4193        case CMD_BINDDESCRIPTORSETS:
4194        case CMD_FILLBUFFER:
4195        case CMD_CLEARCOLORIMAGE:
4196        case CMD_SETEVENT:
4197        case CMD_RESETEVENT:
4198        case CMD_WAITEVENTS:
4199        case CMD_BEGINQUERY:
4200        case CMD_ENDQUERY:
4201        case CMD_RESETQUERYPOOL:
4202        case CMD_COPYQUERYPOOLRESULTS:
4203        case CMD_WRITETIMESTAMP:
4204            skipCall |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4205            break;
4206        case CMD_SETVIEWPORTSTATE:
4207        case CMD_SETSCISSORSTATE:
4208        case CMD_SETLINEWIDTHSTATE:
4209        case CMD_SETDEPTHBIASSTATE:
4210        case CMD_SETBLENDSTATE:
4211        case CMD_SETDEPTHBOUNDSSTATE:
4212        case CMD_SETSTENCILREADMASKSTATE:
4213        case CMD_SETSTENCILWRITEMASKSTATE:
4214        case CMD_SETSTENCILREFERENCESTATE:
4215        case CMD_BINDINDEXBUFFER:
4216        case CMD_BINDVERTEXBUFFER:
4217        case CMD_DRAW:
4218        case CMD_DRAWINDEXED:
4219        case CMD_DRAWINDIRECT:
4220        case CMD_DRAWINDEXEDINDIRECT:
4221        case CMD_BLITIMAGE:
4222        case CMD_CLEARATTACHMENTS:
4223        case CMD_CLEARDEPTHSTENCILIMAGE:
4224        case CMD_RESOLVEIMAGE:
4225        case CMD_BEGINRENDERPASS:
4226        case CMD_NEXTSUBPASS:
4227        case CMD_ENDRENDERPASS:
4228            skipCall |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
4229            break;
4230        case CMD_DISPATCH:
4231        case CMD_DISPATCHINDIRECT:
4232            skipCall |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4233            break;
4234        case CMD_COPYBUFFER:
4235        case CMD_COPYIMAGE:
4236        case CMD_COPYBUFFERTOIMAGE:
4237        case CMD_COPYIMAGETOBUFFER:
4238        case CMD_CLONEIMAGEDATA:
4239        case CMD_UPDATEBUFFER:
4240        case CMD_PIPELINEBARRIER:
4241        case CMD_EXECUTECOMMANDS:
4242            break;
4243        default:
4244            break;
4245        }
4246    }
4247    if (pCB->state != CB_RECORDING) {
4248        skipCall |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
4249        skipCall |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
4250        CMD_NODE cmdNode = {};
4251        // init cmd node and append to end of cmd LL
4252        cmdNode.cmdNumber = ++pCB->numCmds;
4253        cmdNode.type = cmd;
4254        pCB->cmds.push_back(cmdNode);
4255    }
4256    return skipCall;
4257}
4258// Reset the command buffer state
4259//  Maintain the createInfo and set state to CB_NEW, but clear all other state
4260static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
4261    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
4262    if (pCB) {
4263        pCB->in_use.store(0);
4264        pCB->cmds.clear();
4265        // Reset CB state (note that createInfo is not cleared)
4266        pCB->commandBuffer = cb;
4267        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
4268        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
4269        pCB->numCmds = 0;
4270        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
4271        pCB->state = CB_NEW;
4272        pCB->submitCount = 0;
4273        pCB->status = 0;
4274        pCB->viewports.clear();
4275        pCB->scissors.clear();
4276
4277        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4278            // Before clearing lastBoundState, remove any CB bindings from all uniqueBoundSets
4279            for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4280                auto set_node = dev_data->setMap.find(set);
4281                if (set_node != dev_data->setMap.end()) {
4282                    set_node->second->boundCmdBuffers.erase(pCB->commandBuffer);
4283                }
4284            }
4285            pCB->lastBound[i].reset();
4286        }
4287
4288        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
4289        pCB->activeRenderPass = 0;
4290        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
4291        pCB->activeSubpass = 0;
4292        pCB->lastSubmittedFence = VK_NULL_HANDLE;
4293        pCB->lastSubmittedQueue = VK_NULL_HANDLE;
4294        pCB->destroyedSets.clear();
4295        pCB->updatedSets.clear();
4296        pCB->destroyedFramebuffers.clear();
4297        pCB->waitedEvents.clear();
4298        pCB->semaphores.clear();
4299        pCB->events.clear();
4300        pCB->waitedEventsBeforeQueryReset.clear();
4301        pCB->queryToStateMap.clear();
4302        pCB->activeQueries.clear();
4303        pCB->startedQueries.clear();
4304        pCB->imageSubresourceMap.clear();
4305        pCB->imageLayoutMap.clear();
4306        pCB->eventToStageMap.clear();
4307        pCB->drawData.clear();
4308        pCB->currentDrawData.buffers.clear();
4309        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
4310        // Make sure any secondaryCommandBuffers are removed from globalInFlight
4311        for (auto secondary_cb : pCB->secondaryCommandBuffers) {
4312            dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
4313        }
4314        pCB->secondaryCommandBuffers.clear();
4315        pCB->updateImages.clear();
4316        pCB->updateBuffers.clear();
4317        clear_cmd_buf_and_mem_references(dev_data, pCB);
4318        pCB->eventUpdates.clear();
4319
4320        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
4321        for (auto framebuffer : pCB->framebuffers) {
4322            auto fbNode = dev_data->frameBufferMap.find(framebuffer);
4323            if (fbNode != dev_data->frameBufferMap.end()) {
4324                fbNode->second.referencingCmdBuffers.erase(pCB->commandBuffer);
4325            }
4326        }
4327        pCB->framebuffers.clear();
4328
4329    }
4330}
4331
4332// Set PSO-related status bits for CB, including dynamic state set via PSO
4333static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
4334    // Account for any dynamic state not set via this PSO
4335    if (!pPipe->graphicsPipelineCI.pDynamicState ||
4336        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static
4337        pCB->status = CBSTATUS_ALL;
4338    } else {
4339        // First consider all state on
4340        // Then unset any state that's noted as dynamic in PSO
4341        // Finally OR that into CB statemask
4342        CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
4343        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
4344            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
4345            case VK_DYNAMIC_STATE_VIEWPORT:
4346                psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET;
4347                break;
4348            case VK_DYNAMIC_STATE_SCISSOR:
4349                psoDynStateMask &= ~CBSTATUS_SCISSOR_SET;
4350                break;
4351            case VK_DYNAMIC_STATE_LINE_WIDTH:
4352                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
4353                break;
4354            case VK_DYNAMIC_STATE_DEPTH_BIAS:
4355                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
4356                break;
4357            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
4358                psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
4359                break;
4360            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
4361                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
4362                break;
4363            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
4364                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4365                break;
4366            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4367                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4368                break;
4369            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4370                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4371                break;
4372            default:
4373                // TODO : Flag error here
4374                break;
4375            }
4376        }
4377        pCB->status |= psoDynStateMask;
4378    }
4379}
4380
4381// Print the last bound Gfx Pipeline
4382static bool printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
4383    bool skipCall = false;
4384    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4385    if (pCB) {
4386        PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
4387        if (!pPipeTrav) {
4388            // nothing to print
4389        } else {
4390            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4391                                __LINE__, DRAWSTATE_NONE, "DS", "%s",
4392                                vk_print_vkgraphicspipelinecreateinfo(
4393                                    reinterpret_cast<const VkGraphicsPipelineCreateInfo *>(&pPipeTrav->graphicsPipelineCI), "{DS}")
4394                                    .c_str());
4395        }
4396    }
4397    return skipCall;
4398}
4399
4400static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
4401    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4402    if (pCB && pCB->cmds.size() > 0) {
4403        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4404                DRAWSTATE_NONE, "DS", "Cmds in CB %p", (void *)cb);
4405        vector<CMD_NODE> cmds = pCB->cmds;
4406        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
4407            // TODO : Need to pass cb as srcObj here
4408            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4409                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD#%" PRIu64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
4410        }
4411    } else {
4412        // Nothing to print
4413    }
4414}
4415
4416static bool synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
4417    bool skipCall = false;
4418    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
4419        return skipCall;
4420    }
4421    skipCall |= printPipeline(my_data, cb);
4422    return skipCall;
4423}
4424
4425// Flags validation error if the associated call is made inside a render pass. The apiName
4426// routine should ONLY be called outside a render pass.
4427static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4428    bool inside = false;
4429    if (pCB->activeRenderPass) {
4430        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4431                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
4432                         "%s: It is invalid to issue this call inside an active render pass (%#" PRIxLEAST64 ")", apiName,
4433                         (uint64_t)pCB->activeRenderPass);
4434    }
4435    return inside;
4436}
4437
4438// Flags validation error if the associated call is made outside a render pass. The apiName
4439// routine should ONLY be called inside a render pass.
4440static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4441    bool outside = false;
4442    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4443        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4444         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4445        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4446                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
4447                          "%s: This call must be issued inside an active render pass.", apiName);
4448    }
4449    return outside;
4450}
4451
4452static void init_core_validation(layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
4453
4454    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
4455
4456}
4457
4458VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4459vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
4460    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4461
4462    assert(chain_info->u.pLayerInfo);
4463    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4464    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4465    if (fpCreateInstance == NULL)
4466        return VK_ERROR_INITIALIZATION_FAILED;
4467
4468    // Advance the link info for the next element on the chain
4469    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4470
4471    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4472    if (result != VK_SUCCESS)
4473        return result;
4474
4475    layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
4476    instance_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
4477    layer_init_instance_dispatch_table(*pInstance, instance_data->instance_dispatch_table, fpGetInstanceProcAddr);
4478
4479    instance_data->report_data =
4480        debug_report_create_instance(instance_data->instance_dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount,
4481                                     pCreateInfo->ppEnabledExtensionNames);
4482
4483    init_core_validation(instance_data, pAllocator);
4484
4485    ValidateLayerOrdering(*pCreateInfo);
4486
4487    return result;
4488}
4489
4490/* hook DestroyInstance to remove tableInstanceMap entry */
4491VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
4492    // TODOSC : Shouldn't need any customization here
4493    dispatch_key key = get_dispatch_key(instance);
4494    // TBD: Need any locking this early, in case this function is called at the
4495    // same time by more than one thread?
4496    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4497    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
4498    pTable->DestroyInstance(instance, pAllocator);
4499
4500    std::lock_guard<std::mutex> lock(global_lock);
4501    // Clean up logging callback, if any
4502    while (my_data->logging_callback.size() > 0) {
4503        VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
4504        layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
4505        my_data->logging_callback.pop_back();
4506    }
4507
4508    layer_debug_report_destroy_instance(my_data->report_data);
4509    delete my_data->instance_dispatch_table;
4510    layer_data_map.erase(key);
4511}
4512
4513static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
4514    uint32_t i;
4515    // TBD: Need any locking, in case this function is called at the same time
4516    // by more than one thread?
4517    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4518    dev_data->device_extensions.wsi_enabled = false;
4519
4520    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4521    PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
4522    pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
4523    pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
4524    pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
4525    pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
4526    pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
4527
4528    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4529        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4530            dev_data->device_extensions.wsi_enabled = true;
4531    }
4532}
4533
4534VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4535                                                              const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4536    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4537
4538    assert(chain_info->u.pLayerInfo);
4539    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4540    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4541    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
4542    if (fpCreateDevice == NULL) {
4543        return VK_ERROR_INITIALIZATION_FAILED;
4544    }
4545
4546    // Advance the link info for the next element on the chain
4547    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4548
4549    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4550    if (result != VK_SUCCESS) {
4551        return result;
4552    }
4553
4554    std::unique_lock<std::mutex> lock(global_lock);
4555    layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
4556    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4557
4558    // Setup device dispatch table
4559    my_device_data->device_dispatch_table = new VkLayerDispatchTable;
4560    layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
4561    my_device_data->device = *pDevice;
4562
4563    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4564    createDeviceRegisterExtensions(pCreateInfo, *pDevice);
4565    // Get physical device limits for this device
4566    my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties));
4567    uint32_t count;
4568    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4569    my_device_data->phys_dev_properties.queue_family_properties.resize(count);
4570    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
4571        gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]);
4572    // TODO: device limits should make sure these are compatible
4573    if (pCreateInfo->pEnabledFeatures) {
4574        my_device_data->phys_dev_properties.features = *pCreateInfo->pEnabledFeatures;
4575    } else {
4576        memset(&my_device_data->phys_dev_properties.features, 0, sizeof(VkPhysicalDeviceFeatures));
4577    }
4578    // Store physical device mem limits into device layer_data struct
4579    my_instance_data->instance_dispatch_table->GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props);
4580    lock.unlock();
4581
4582    ValidateLayerOrdering(*pCreateInfo);
4583
4584    return result;
4585}
4586
4587// prototype
4588static void deleteRenderPasses(layer_data *);
4589VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4590    // TODOSC : Shouldn't need any customization here
4591    dispatch_key key = get_dispatch_key(device);
4592    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4593    // Free all the memory
4594    std::unique_lock<std::mutex> lock(global_lock);
4595    deletePipelines(dev_data);
4596    deleteRenderPasses(dev_data);
4597    deleteCommandBuffers(dev_data);
4598    deletePools(dev_data);
4599    for (auto del_layout : dev_data->descriptorSetLayoutMap) {
4600        delete del_layout.second;
4601    }
4602    dev_data->descriptorSetLayoutMap.clear();
4603    dev_data->imageViewMap.clear();
4604    dev_data->imageMap.clear();
4605    dev_data->imageSubresourceMap.clear();
4606    dev_data->imageLayoutMap.clear();
4607    dev_data->bufferViewMap.clear();
4608    dev_data->bufferMap.clear();
4609    // Queues persist until device is destroyed
4610    dev_data->queueMap.clear();
4611    lock.unlock();
4612#if MTMERGESOURCE
4613    bool skipCall = false;
4614    lock.lock();
4615    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4616            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4617    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4618            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4619    print_mem_list(dev_data);
4620    printCBList(dev_data);
4621    // Report any memory leaks
4622    DEVICE_MEM_INFO *pInfo = NULL;
4623    if (!dev_data->memObjMap.empty()) {
4624        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4625            pInfo = &(*ii).second;
4626            if (pInfo->allocInfo.allocationSize != 0) {
4627                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4628                skipCall |=
4629                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4630                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK,
4631                            "MEM", "Mem Object %" PRIu64 " has not been freed. You should clean up this memory by calling "
4632                                   "vkFreeMemory(%" PRIu64 ") prior to vkDestroyDevice().",
4633                            (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4634            }
4635        }
4636    }
4637    layer_debug_report_destroy_device(device);
4638    lock.unlock();
4639
4640#if DISPATCH_MAP_DEBUG
4641    fprintf(stderr, "Device: %p, key: %p\n", device, key);
4642#endif
4643    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4644    if (!skipCall) {
4645        pDisp->DestroyDevice(device, pAllocator);
4646    }
4647#else
4648    dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
4649#endif
4650    delete dev_data->device_dispatch_table;
4651    layer_data_map.erase(key);
4652}
4653
4654static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4655
4656VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4657vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
4658    return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
4659}
4660
4661VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4662vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
4663    return util_GetLayerProperties(ARRAY_SIZE(cv_global_layers), cv_global_layers, pCount, pProperties);
4664}
4665
4666// TODO: Why does this exist - can we just use global?
4667static const VkLayerProperties cv_device_layers[] = {{
4668    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
4669}};
4670
4671VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
4672                                                                                    const char *pLayerName, uint32_t *pCount,
4673                                                                                    VkExtensionProperties *pProperties) {
4674    if (pLayerName == NULL) {
4675        dispatch_key key = get_dispatch_key(physicalDevice);
4676        layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4677        return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
4678    } else {
4679        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
4680    }
4681}
4682
4683VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
4684vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
4685    /* draw_state physical device layers are the same as global */
4686    return util_GetLayerProperties(ARRAY_SIZE(cv_device_layers), cv_device_layers, pCount, pProperties);
4687}
4688
4689// This validates that the initial layout specified in the command buffer for
4690// the IMAGE is the same
4691// as the global IMAGE layout
4692static bool ValidateCmdBufImageLayouts(VkCommandBuffer cmdBuffer) {
4693    bool skip_call = false;
4694    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
4695    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
4696    for (auto cb_image_data : pCB->imageLayoutMap) {
4697        VkImageLayout imageLayout;
4698        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4699            skip_call |=
4700                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4701                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image %" PRIu64 ".",
4702                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4703        } else {
4704            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4705                // TODO: Set memory invalid which is in mem_tracker currently
4706            } else if (imageLayout != cb_image_data.second.initialLayout) {
4707                if (cb_image_data.first.hasSubresource) {
4708                    skip_call |= log_msg(
4709                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4710                        reinterpret_cast<uint64_t &>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4711                        "Cannot submit cmd buffer using image (%" PRIx64 ") [sub-resource: array layer %u, mip level %u], "
4712                        "with layout %s when first use is %s.",
4713                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.arrayLayer,
4714                        cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout),
4715                        string_VkImageLayout(cb_image_data.second.initialLayout));
4716                } else {
4717                    skip_call |= log_msg(
4718                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4719                        reinterpret_cast<uint64_t &>(cmdBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4720                        "Cannot submit cmd buffer using image (%" PRIx64 ") with layout %s when "
4721                        "first use is %s.",
4722                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4723                        string_VkImageLayout(cb_image_data.second.initialLayout));
4724                }
4725            }
4726            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4727        }
4728    }
4729    return skip_call;
4730}
4731
4732// Track which resources are in-flight by atomically incrementing their "in_use" count
4733static bool validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB) {
4734    bool skip_call = false;
4735    for (auto drawDataElement : pCB->drawData) {
4736        for (auto buffer : drawDataElement.buffers) {
4737            auto buffer_data = my_data->bufferMap.find(buffer);
4738            if (buffer_data == my_data->bufferMap.end()) {
4739                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4740                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4741                                     "Cannot submit cmd buffer using deleted buffer %" PRIu64 ".", (uint64_t)(buffer));
4742            } else {
4743                buffer_data->second.in_use.fetch_add(1);
4744            }
4745        }
4746    }
4747    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4748        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4749            auto setNode = my_data->setMap.find(set);
4750            if (setNode == my_data->setMap.end()) {
4751                skip_call |=
4752                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4753                            (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS",
4754                            "Cannot submit cmd buffer using deleted descriptor set %" PRIu64 ".", (uint64_t)(set));
4755            } else {
4756                setNode->second->in_use.fetch_add(1);
4757            }
4758        }
4759    }
4760    for (auto semaphore : pCB->semaphores) {
4761        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4762        if (semaphoreNode == my_data->semaphoreMap.end()) {
4763            skip_call |=
4764                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4765                        reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4766                        "Cannot submit cmd buffer using deleted semaphore %" PRIu64 ".", reinterpret_cast<uint64_t &>(semaphore));
4767        } else {
4768            semaphoreNode->second.in_use.fetch_add(1);
4769        }
4770    }
4771    for (auto event : pCB->events) {
4772        auto eventNode = my_data->eventMap.find(event);
4773        if (eventNode == my_data->eventMap.end()) {
4774            skip_call |=
4775                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4776                        reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4777                        "Cannot submit cmd buffer using deleted event %" PRIu64 ".", reinterpret_cast<uint64_t &>(event));
4778        } else {
4779            eventNode->second.in_use.fetch_add(1);
4780        }
4781    }
4782    return skip_call;
4783}
4784
4785// Note: This function assumes that the global lock is held by the calling
4786// thread.
4787static bool cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4788    bool skip_call = false;
4789    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4790    if (pCB) {
4791        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
4792            for (auto event : queryEventsPair.second) {
4793                if (my_data->eventMap[event].needsSignaled) {
4794                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4795                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
4796                                         "Cannot get query results on queryPool %" PRIu64
4797                                         " with index %d which was guarded by unsignaled event %" PRIu64 ".",
4798                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
4799                }
4800            }
4801        }
4802    }
4803    return skip_call;
4804}
4805// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
4806static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
4807    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
4808    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer);
4809    pCB->in_use.fetch_sub(1);
4810    if (!pCB->in_use.load()) {
4811        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
4812    }
4813}
4814
4815static void decrementResources(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4816    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4817    for (auto drawDataElement : pCB->drawData) {
4818        for (auto buffer : drawDataElement.buffers) {
4819            auto buffer_data = my_data->bufferMap.find(buffer);
4820            if (buffer_data != my_data->bufferMap.end()) {
4821                buffer_data->second.in_use.fetch_sub(1);
4822            }
4823        }
4824    }
4825    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4826        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4827            auto setNode = my_data->setMap.find(set);
4828            if (setNode != my_data->setMap.end()) {
4829                setNode->second->in_use.fetch_sub(1);
4830            }
4831        }
4832    }
4833    for (auto semaphore : pCB->semaphores) {
4834        auto semaphoreNode = my_data->semaphoreMap.find(semaphore);
4835        if (semaphoreNode != my_data->semaphoreMap.end()) {
4836            semaphoreNode->second.in_use.fetch_sub(1);
4837        }
4838    }
4839    for (auto event : pCB->events) {
4840        auto eventNode = my_data->eventMap.find(event);
4841        if (eventNode != my_data->eventMap.end()) {
4842            eventNode->second.in_use.fetch_sub(1);
4843        }
4844    }
4845    for (auto queryStatePair : pCB->queryToStateMap) {
4846        my_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4847    }
4848    for (auto eventStagePair : pCB->eventToStageMap) {
4849        my_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4850    }
4851}
4852// For fenceCount fences in pFences, mark fence signaled, decrement in_use, and call
4853//  decrementResources for all priorFences and cmdBuffers associated with fence.
4854static bool decrementResources(layer_data *my_data, uint32_t fenceCount, const VkFence *pFences) {
4855    bool skip_call = false;
4856    std::vector<VkFence> fences;
4857    for (uint32_t i = 0; i < fenceCount; ++i) {
4858        auto fence_data = my_data->fenceMap.find(pFences[i]);
4859        if (fence_data == my_data->fenceMap.end() || !fence_data->second.needsSignaled)
4860            return skip_call;
4861        fence_data->second.needsSignaled = false;
4862        if (fence_data->second.in_use.load()) {
4863            fences.push_back(pFences[i]);
4864            fence_data->second.in_use.fetch_sub(1);
4865        }
4866        decrementResources(my_data, static_cast<uint32_t>(fence_data->second.priorFences.size()),
4867                           fence_data->second.priorFences.data());
4868        for (auto cmdBuffer : fence_data->second.cmdBuffers) {
4869            decrementResources(my_data, cmdBuffer);
4870            skip_call |= cleanInFlightCmdBuffer(my_data, cmdBuffer);
4871            removeInFlightCmdBuffer(my_data, cmdBuffer);
4872        }
4873    }
4874    for (auto fence : fences) {
4875        for (auto queue_data : my_data->queueMap) {
4876            auto last_fence_data = std::find(queue_data.second.lastFences.begin(), queue_data.second.lastFences.end(), fence);
4877            if (last_fence_data != queue_data.second.lastFences.end()) {
4878                queue_data.second.lastFences.erase(last_fence_data);
4879                break;
4880            }
4881        }
4882    }
4883    return skip_call;
4884}
4885// Decrement in_use for all outstanding cmd buffers that were submitted on this queue
4886static bool decrementResources(layer_data *my_data, VkQueue queue) {
4887    bool skip_call = false;
4888    auto queue_data = my_data->queueMap.find(queue);
4889    if (queue_data != my_data->queueMap.end()) {
4890        for (auto cmdBuffer : queue_data->second.untrackedCmdBuffers) {
4891            decrementResources(my_data, cmdBuffer);
4892            skip_call |= cleanInFlightCmdBuffer(my_data, cmdBuffer);
4893            removeInFlightCmdBuffer(my_data, cmdBuffer);
4894        }
4895        queue_data->second.untrackedCmdBuffers.clear();
4896        skip_call |= decrementResources(my_data, static_cast<uint32_t>(queue_data->second.lastFences.size()),
4897                                        queue_data->second.lastFences.data());
4898    }
4899    return skip_call;
4900}
4901
4902// This function merges command buffer tracking between queues when there is a semaphore dependency
4903// between them (see below for details as to how tracking works). When this happens, the prior
4904// fences from the signaling queue are merged into the wait queue as well as any untracked command
4905// buffers.
4906static void updateTrackedCommandBuffers(layer_data *dev_data, VkQueue queue, VkQueue other_queue, VkFence fence) {
4907    if (queue == other_queue) {
4908        return;
4909    }
4910    auto queue_data = dev_data->queueMap.find(queue);
4911    auto other_queue_data = dev_data->queueMap.find(other_queue);
4912    if (queue_data == dev_data->queueMap.end() || other_queue_data == dev_data->queueMap.end()) {
4913        return;
4914    }
4915    for (auto fenceInner : other_queue_data->second.lastFences) {
4916        queue_data->second.lastFences.push_back(fenceInner);
4917    }
4918    if (fence != VK_NULL_HANDLE) {
4919        auto fence_data = dev_data->fenceMap.find(fence);
4920        if (fence_data == dev_data->fenceMap.end()) {
4921            return;
4922        }
4923        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
4924            fence_data->second.cmdBuffers.push_back(cmdbuffer);
4925        }
4926        other_queue_data->second.untrackedCmdBuffers.clear();
4927    } else {
4928        for (auto cmdbuffer : other_queue_data->second.untrackedCmdBuffers) {
4929            queue_data->second.untrackedCmdBuffers.push_back(cmdbuffer);
4930        }
4931        other_queue_data->second.untrackedCmdBuffers.clear();
4932    }
4933    for (auto eventStagePair : other_queue_data->second.eventToStageMap) {
4934        queue_data->second.eventToStageMap[eventStagePair.first] = eventStagePair.second;
4935    }
4936}
4937
4938// This is the core function for tracking command buffers. There are two primary ways command
4939// buffers are tracked. When submitted they are stored in the command buffer list associated
4940// with a fence or the untracked command buffer list associated with a queue if no fence is used.
4941// Each queue also stores the last fence that was submitted onto the queue. This allows us to
4942// create a linked list of fences and their associated command buffers so if one fence is
4943// waited on, prior fences on that queue are also considered to have been waited on. When a fence is
4944// waited on (either via a queue, device or fence), we free the cmd buffers for that fence and
4945// recursively call with the prior fences.
4946static void trackCommandBuffers(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
4947                                VkFence fence) {
4948    auto queue_data = my_data->queueMap.find(queue);
4949    if (fence != VK_NULL_HANDLE) {
4950        vector<VkFence> prior_fences;
4951        auto fence_data = my_data->fenceMap.find(fence);
4952        if (fence_data == my_data->fenceMap.end()) {
4953            return;
4954        }
4955        fence_data->second.cmdBuffers.clear();
4956        if (queue_data != my_data->queueMap.end()) {
4957            prior_fences = queue_data->second.lastFences;
4958            queue_data->second.lastFences.clear();
4959            queue_data->second.lastFences.push_back(fence);
4960            for (auto cmdbuffer : queue_data->second.untrackedCmdBuffers) {
4961                fence_data->second.cmdBuffers.push_back(cmdbuffer);
4962            }
4963            queue_data->second.untrackedCmdBuffers.clear();
4964        }
4965        fence_data->second.priorFences = prior_fences;
4966        fence_data->second.needsSignaled = true;
4967        fence_data->second.queue = queue;
4968        fence_data->second.in_use.fetch_add(1);
4969        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4970            const VkSubmitInfo *submit = &pSubmits[submit_idx];
4971            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
4972                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
4973                    fence_data->second.cmdBuffers.push_back(secondaryCmdBuffer);
4974                }
4975                fence_data->second.cmdBuffers.push_back(submit->pCommandBuffers[i]);
4976            }
4977        }
4978    } else {
4979        if (queue_data != my_data->queueMap.end()) {
4980            for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4981                const VkSubmitInfo *submit = &pSubmits[submit_idx];
4982                for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
4983                    for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
4984                        queue_data->second.untrackedCmdBuffers.push_back(secondaryCmdBuffer);
4985                    }
4986                    queue_data->second.untrackedCmdBuffers.push_back(submit->pCommandBuffers[i]);
4987                }
4988            }
4989        }
4990    }
4991}
4992
4993static void markCommandBuffersInFlight(layer_data *my_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
4994                                       VkFence fence) {
4995    auto queue_data = my_data->queueMap.find(queue);
4996    if (queue_data != my_data->queueMap.end()) {
4997        for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4998            const VkSubmitInfo *submit = &pSubmits[submit_idx];
4999            for (uint32_t i = 0; i < submit->commandBufferCount; ++i) {
5000                // Add cmdBuffers to the global set and increment count
5001                GLOBAL_CB_NODE *pCB = getCBNode(my_data, submit->pCommandBuffers[i]);
5002                for (auto secondaryCmdBuffer : my_data->commandBufferMap[submit->pCommandBuffers[i]]->secondaryCommandBuffers) {
5003                    my_data->globalInFlightCmdBuffers.insert(secondaryCmdBuffer);
5004                    GLOBAL_CB_NODE *pSubCB = getCBNode(my_data, secondaryCmdBuffer);
5005                    pSubCB->in_use.fetch_add(1);
5006                }
5007                my_data->globalInFlightCmdBuffers.insert(submit->pCommandBuffers[i]);
5008                pCB->in_use.fetch_add(1);
5009            }
5010        }
5011    }
5012}
5013
5014static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5015    bool skip_call = false;
5016    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
5017        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
5018        skip_call |=
5019            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5020                    __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
5021                    "Command Buffer %#" PRIx64 " is already in use and is not marked for simultaneous use.",
5022                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
5023    }
5024    return skip_call;
5025}
5026
5027static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5028    bool skipCall = false;
5029    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
5030    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
5031        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5032                            __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5033                            "CB %#" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
5034                            "set, but has been submitted %#" PRIxLEAST64 " times.",
5035                            (uint64_t)(pCB->commandBuffer), pCB->submitCount);
5036    }
5037    // Validate that cmd buffers have been updated
5038    if (CB_RECORDED != pCB->state) {
5039        if (CB_INVALID == pCB->state) {
5040            // Inform app of reason CB invalid
5041            bool causeReported = false;
5042            if (!pCB->destroyedSets.empty()) {
5043                std::stringstream set_string;
5044                for (auto set : pCB->destroyedSets)
5045                    set_string << " " << set;
5046
5047                skipCall |=
5048                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5049                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5050                            "You are submitting command buffer %#" PRIxLEAST64
5051                            " that is invalid because it had the following bound descriptor set(s) destroyed: %s",
5052                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5053                causeReported = true;
5054            }
5055            if (!pCB->updatedSets.empty()) {
5056                std::stringstream set_string;
5057                for (auto set : pCB->updatedSets)
5058                    set_string << " " << set;
5059
5060                skipCall |=
5061                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5062                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5063                            "You are submitting command buffer %#" PRIxLEAST64
5064                            " that is invalid because it had the following bound descriptor set(s) updated: %s",
5065                            (uint64_t)(pCB->commandBuffer), set_string.str().c_str());
5066                causeReported = true;
5067            }
5068            if (!pCB->destroyedFramebuffers.empty()) {
5069                std::stringstream fb_string;
5070                for (auto fb : pCB->destroyedFramebuffers)
5071                    fb_string << " " << fb;
5072
5073                skipCall |=
5074                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5075                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5076                            "You are submitting command buffer %#" PRIxLEAST64 " that is invalid because it had the following "
5077                            "referenced framebuffers destroyed: %s",
5078                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), fb_string.str().c_str());
5079                causeReported = true;
5080            }
5081            // TODO : This is defensive programming to make sure an error is
5082            //  flagged if we hit this INVALID cmd buffer case and none of the
5083            //  above cases are hit. As the number of INVALID cases grows, this
5084            //  code should be updated to seemlessly handle all the cases.
5085            if (!causeReported) {
5086                skipCall |= log_msg(
5087                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5088                    reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
5089                    "You are submitting command buffer %#" PRIxLEAST64 " that is invalid due to an unknown cause. Validation "
5090                    "should "
5091                    "be improved to report the exact cause.",
5092                    reinterpret_cast<uint64_t &>(pCB->commandBuffer));
5093            }
5094        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
5095            skipCall |=
5096                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5097                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
5098                        "You must call vkEndCommandBuffer() on CB %#" PRIxLEAST64 " before this call to vkQueueSubmit()!",
5099                        (uint64_t)(pCB->commandBuffer));
5100        }
5101    }
5102    return skipCall;
5103}
5104
5105static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5106    // Track in-use for resources off of primary and any secondary CBs
5107    bool skipCall = validateAndIncrementResources(dev_data, pCB);
5108    if (!pCB->secondaryCommandBuffers.empty()) {
5109        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
5110            skipCall |= validateAndIncrementResources(dev_data, dev_data->commandBufferMap[secondaryCmdBuffer]);
5111            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
5112            if (pSubCB->primaryCommandBuffer != pCB->commandBuffer) {
5113                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5114                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5115                        "CB %#" PRIxLEAST64 " was submitted with secondary buffer %#" PRIxLEAST64
5116                        " but that buffer has subsequently been bound to "
5117                        "primary cmd buffer %#" PRIxLEAST64 ".",
5118                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
5119                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
5120            }
5121        }
5122    }
5123    skipCall |= validateCommandBufferState(dev_data, pCB);
5124    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
5125    // on device
5126    skipCall |= validateCommandBufferSimultaneousUse(dev_data, pCB);
5127    return skipCall;
5128}
5129
5130VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5131vkQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
5132    bool skipCall = false;
5133    GLOBAL_CB_NODE *pCBNode = NULL;
5134    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5135    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5136    std::unique_lock<std::mutex> lock(global_lock);
5137    // First verify that fence is not in use
5138    if (fence != VK_NULL_HANDLE) {
5139        dev_data->fenceMap[fence].queue = queue;
5140        if ((submitCount != 0) && dev_data->fenceMap[fence].in_use.load()) {
5141            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5142                                (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5143                                "Fence %#" PRIx64 " is already in use by another submission.", (uint64_t)(fence));
5144        }
5145        if (!dev_data->fenceMap[fence].needsSignaled) {
5146            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5147                                reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5148                                "Fence %#" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
5149                                reinterpret_cast<uint64_t &>(fence));
5150        }
5151    }
5152    // TODO : Review these old print functions and clean up as appropriate
5153    print_mem_list(dev_data);
5154    printCBList(dev_data);
5155    // Update cmdBuffer-related data structs and mark fence in-use
5156    trackCommandBuffers(dev_data, queue, submitCount, pSubmits, fence);
5157    // Now verify each individual submit
5158    std::unordered_set<VkQueue> processed_other_queues;
5159    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5160        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5161        vector<VkSemaphore> semaphoreList;
5162        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
5163            const VkSemaphore &semaphore = submit->pWaitSemaphores[i];
5164            semaphoreList.push_back(semaphore);
5165            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
5166                if (dev_data->semaphoreMap[semaphore].signaled) {
5167                    dev_data->semaphoreMap[semaphore].signaled = false;
5168                } else {
5169                    skipCall |=
5170                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5171                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
5172                                "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
5173                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
5174                }
5175                const VkQueue &other_queue = dev_data->semaphoreMap[semaphore].queue;
5176                if (other_queue != VK_NULL_HANDLE && !processed_other_queues.count(other_queue)) {
5177                    updateTrackedCommandBuffers(dev_data, queue, other_queue, fence);
5178                    processed_other_queues.insert(other_queue);
5179                }
5180            }
5181        }
5182        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
5183            const VkSemaphore &semaphore = submit->pSignalSemaphores[i];
5184            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
5185                semaphoreList.push_back(semaphore);
5186                if (dev_data->semaphoreMap[semaphore].signaled) {
5187                    skipCall |=
5188                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5189                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
5190                                "Queue %#" PRIx64 " is signaling semaphore %#" PRIx64
5191                                " that has already been signaled but not waited on by queue %#" PRIx64 ".",
5192                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
5193                                reinterpret_cast<uint64_t &>(dev_data->semaphoreMap[semaphore].queue));
5194                } else {
5195                    dev_data->semaphoreMap[semaphore].signaled = true;
5196                    dev_data->semaphoreMap[semaphore].queue = queue;
5197                }
5198            }
5199        }
5200        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5201            skipCall |= ValidateCmdBufImageLayouts(submit->pCommandBuffers[i]);
5202            pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5203            if (pCBNode) {
5204                pCBNode->semaphores = semaphoreList;
5205                pCBNode->submitCount++; // increment submit count
5206                pCBNode->lastSubmittedFence = fence;
5207                pCBNode->lastSubmittedQueue = queue;
5208                skipCall |= validatePrimaryCommandBufferState(dev_data, pCBNode);
5209                // Call submit-time functions to validate/update state
5210                for (auto &function : pCBNode->validate_functions) {
5211                    skipCall |= function();
5212                }
5213                for (auto &function : pCBNode->eventUpdates) {
5214                    skipCall |= function(queue);
5215                }
5216            }
5217        }
5218    }
5219    markCommandBuffersInFlight(dev_data, queue, submitCount, pSubmits, fence);
5220    lock.unlock();
5221    if (!skipCall)
5222        result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence);
5223
5224    return result;
5225}
5226
5227#if MTMERGESOURCE
5228VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
5229                                                                const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
5230    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5231    VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
5232    // TODO : Track allocations and overall size here
5233    std::lock_guard<std::mutex> lock(global_lock);
5234    add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
5235    print_mem_list(my_data);
5236    return result;
5237}
5238
5239VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5240vkFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
5241    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5242
5243    // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
5244    // Before freeing a memory object, an application must ensure the memory object is no longer
5245    // in use by the device—for example by command buffers queued for execution. The memory need
5246    // not yet be unbound from all images and buffers, but any further use of those images or
5247    // buffers (on host or device) for anything other than destroying those objects will result in
5248    // undefined behavior.
5249
5250    std::unique_lock<std::mutex> lock(global_lock);
5251    freeMemObjInfo(my_data, device, mem, false);
5252    print_mem_list(my_data);
5253    printCBList(my_data);
5254    lock.unlock();
5255    my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator);
5256}
5257
5258static bool validateMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5259    bool skipCall = false;
5260
5261    if (size == 0) {
5262        // TODO: a size of 0 is not listed as an invalid use in the spec, should it be?
5263        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5264                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5265                           "VkMapMemory: Attempting to map memory range of size zero");
5266    }
5267
5268    auto mem_element = my_data->memObjMap.find(mem);
5269    if (mem_element != my_data->memObjMap.end()) {
5270        // It is an application error to call VkMapMemory on an object that is already mapped
5271        if (mem_element->second.memRange.size != 0) {
5272            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5273                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5274                               "VkMapMemory: Attempting to map memory on an already-mapped object %#" PRIxLEAST64, (uint64_t)mem);
5275        }
5276
5277        // Validate that offset + size is within object's allocationSize
5278        if (size == VK_WHOLE_SIZE) {
5279            if (offset >= mem_element->second.allocInfo.allocationSize) {
5280                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5281                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5282                                   "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5283                                   mem_element->second.allocInfo.allocationSize, mem_element->second.allocInfo.allocationSize);
5284            }
5285        } else {
5286            if ((offset + size) > mem_element->second.allocInfo.allocationSize) {
5287                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5288                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5289                                   "MEM", "Mapping Memory from %" PRIu64 " to %" PRIu64 " with total array size %" PRIu64, offset,
5290                                   size + offset, mem_element->second.allocInfo.allocationSize);
5291            }
5292        }
5293    }
5294    return skipCall;
5295}
5296
5297static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5298    auto mem_element = my_data->memObjMap.find(mem);
5299    if (mem_element != my_data->memObjMap.end()) {
5300        MemRange new_range;
5301        new_range.offset = offset;
5302        new_range.size = size;
5303        mem_element->second.memRange = new_range;
5304    }
5305}
5306
5307static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
5308    bool skipCall = false;
5309    auto mem_element = my_data->memObjMap.find(mem);
5310    if (mem_element != my_data->memObjMap.end()) {
5311        if (!mem_element->second.memRange.size) {
5312            // Valid Usage: memory must currently be mapped
5313            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5314                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5315                               "Unmapping Memory without memory being mapped: mem obj %#" PRIxLEAST64, (uint64_t)mem);
5316        }
5317        mem_element->second.memRange.size = 0;
5318        if (mem_element->second.pData) {
5319            free(mem_element->second.pData);
5320            mem_element->second.pData = 0;
5321        }
5322    }
5323    return skipCall;
5324}
5325
5326static char NoncoherentMemoryFillValue = 0xb;
5327
5328static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize size, void **ppData) {
5329    auto mem_element = dev_data->memObjMap.find(mem);
5330    if (mem_element != dev_data->memObjMap.end()) {
5331        mem_element->second.pDriverData = *ppData;
5332        uint32_t index = mem_element->second.allocInfo.memoryTypeIndex;
5333        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
5334            mem_element->second.pData = 0;
5335        } else {
5336            if (size == VK_WHOLE_SIZE) {
5337                size = mem_element->second.allocInfo.allocationSize;
5338            }
5339            size_t convSize = (size_t)(size);
5340            mem_element->second.pData = malloc(2 * convSize);
5341            memset(mem_element->second.pData, NoncoherentMemoryFillValue, 2 * convSize);
5342            *ppData = static_cast<char *>(mem_element->second.pData) + (convSize / 2);
5343        }
5344    }
5345}
5346#endif
5347// Verify that state for fence being waited on is appropriate. That is,
5348//  a fence being waited on should not already be signalled and
5349//  it should have been submitted on a queue or during acquire next image
5350static inline bool verifyWaitFenceState(VkDevice device, VkFence fence, const char *apiCall) {
5351    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5352    bool skipCall = false;
5353    auto pFenceInfo = my_data->fenceMap.find(fence);
5354    if (pFenceInfo != my_data->fenceMap.end()) {
5355        if (!pFenceInfo->second.firstTimeFlag) {
5356            if (!pFenceInfo->second.needsSignaled) {
5357                skipCall |=
5358                    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5359                            (uint64_t)fence, __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5360                            "%s specified fence %#" PRIxLEAST64 " already in SIGNALED state.", apiCall, (uint64_t)fence);
5361            }
5362            if (!pFenceInfo->second.queue && !pFenceInfo->second.swapchain) { // Checking status of unsubmitted fence
5363                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5364                                    reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5365                                    "%s called for fence %#" PRIxLEAST64 " which has not been submitted on a Queue or during "
5366                                    "acquire next image.",
5367                                    apiCall, reinterpret_cast<uint64_t &>(fence));
5368            }
5369        } else {
5370            pFenceInfo->second.firstTimeFlag = false;
5371        }
5372    }
5373    return skipCall;
5374}
5375
5376VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5377vkWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
5378    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5379    bool skip_call = false;
5380    // Verify fence status of submitted fences
5381    std::unique_lock<std::mutex> lock(global_lock);
5382    for (uint32_t i = 0; i < fenceCount; i++) {
5383        skip_call |= verifyWaitFenceState(device, pFences[i], "vkWaitForFences");
5384    }
5385    lock.unlock();
5386    if (skip_call)
5387        return VK_ERROR_VALIDATION_FAILED_EXT;
5388
5389    VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
5390
5391    if (result == VK_SUCCESS) {
5392        lock.lock();
5393        // When we know that all fences are complete we can clean/remove their CBs
5394        if (waitAll || fenceCount == 1) {
5395            skip_call |= decrementResources(dev_data, fenceCount, pFences);
5396        }
5397        // NOTE : Alternate case not handled here is when some fences have completed. In
5398        //  this case for app to guarantee which fences completed it will have to call
5399        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
5400        lock.unlock();
5401    }
5402    if (skip_call)
5403        return VK_ERROR_VALIDATION_FAILED_EXT;
5404    return result;
5405}
5406
5407VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus(VkDevice device, VkFence fence) {
5408    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5409    bool skipCall = false;
5410    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5411    std::unique_lock<std::mutex> lock(global_lock);
5412    skipCall = verifyWaitFenceState(device, fence, "vkGetFenceStatus");
5413    lock.unlock();
5414
5415    if (skipCall)
5416        return result;
5417
5418    result = dev_data->device_dispatch_table->GetFenceStatus(device, fence);
5419    bool skip_call = false;
5420    lock.lock();
5421    if (result == VK_SUCCESS) {
5422        skipCall |= decrementResources(dev_data, 1, &fence);
5423    }
5424    lock.unlock();
5425    if (skip_call)
5426        return VK_ERROR_VALIDATION_FAILED_EXT;
5427    return result;
5428}
5429
5430VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
5431                                                            VkQueue *pQueue) {
5432    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5433    dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
5434    std::lock_guard<std::mutex> lock(global_lock);
5435
5436    // Add queue to tracking set only if it is new
5437    auto result = dev_data->queues.emplace(*pQueue);
5438    if (result.second == true) {
5439        QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
5440        pQNode->device = device;
5441    }
5442}
5443
5444VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle(VkQueue queue) {
5445    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5446    bool skip_call = false;
5447    skip_call |= decrementResources(dev_data, queue);
5448    if (skip_call)
5449        return VK_ERROR_VALIDATION_FAILED_EXT;
5450    VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue);
5451    return result;
5452}
5453
5454VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle(VkDevice device) {
5455    bool skip_call = false;
5456    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5457    std::unique_lock<std::mutex> lock(global_lock);
5458    for (auto queue : dev_data->queues) {
5459        skip_call |= decrementResources(dev_data, queue);
5460    }
5461    dev_data->globalInFlightCmdBuffers.clear();
5462    lock.unlock();
5463    if (skip_call)
5464        return VK_ERROR_VALIDATION_FAILED_EXT;
5465    VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device);
5466    return result;
5467}
5468
5469VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
5470    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5471    bool skipCall = false;
5472    std::unique_lock<std::mutex> lock(global_lock);
5473    auto fence_pair = dev_data->fenceMap.find(fence);
5474    if (fence_pair != dev_data->fenceMap.end()) {
5475        if (fence_pair->second.in_use.load()) {
5476            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5477                                (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5478                                "Fence %#" PRIx64 " is in use by a command buffer.", (uint64_t)(fence));
5479        }
5480        dev_data->fenceMap.erase(fence_pair);
5481    }
5482    lock.unlock();
5483
5484    if (!skipCall)
5485        dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
5486}
5487
5488VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5489vkDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5490    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5491    dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
5492    std::lock_guard<std::mutex> lock(global_lock);
5493    auto item = dev_data->semaphoreMap.find(semaphore);
5494    if (item != dev_data->semaphoreMap.end()) {
5495        if (item->second.in_use.load()) {
5496            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5497                    reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
5498                    "Cannot delete semaphore %" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore));
5499        }
5500        dev_data->semaphoreMap.erase(semaphore);
5501    }
5502    // TODO : Clean up any internal data structures using this obj.
5503}
5504
5505VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5506    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5507    bool skip_call = false;
5508    std::unique_lock<std::mutex> lock(global_lock);
5509    auto event_data = dev_data->eventMap.find(event);
5510    if (event_data != dev_data->eventMap.end()) {
5511        if (event_data->second.in_use.load()) {
5512            skip_call |= log_msg(
5513                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
5514                reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
5515                "Cannot delete event %" PRIx64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event));
5516        }
5517        dev_data->eventMap.erase(event_data);
5518    }
5519    lock.unlock();
5520    if (!skip_call)
5521        dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator);
5522    // TODO : Clean up any internal data structures using this obj.
5523}
5524
5525VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5526vkDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5527    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5528        ->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
5529    // TODO : Clean up any internal data structures using this obj.
5530}
5531
5532VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
5533                                                     uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
5534                                                     VkQueryResultFlags flags) {
5535    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5536    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
5537    GLOBAL_CB_NODE *pCB = nullptr;
5538    std::unique_lock<std::mutex> lock(global_lock);
5539    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5540        pCB = getCBNode(dev_data, cmdBuffer);
5541        for (auto queryStatePair : pCB->queryToStateMap) {
5542            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
5543        }
5544    }
5545    bool skip_call = false;
5546    for (uint32_t i = 0; i < queryCount; ++i) {
5547        QueryObject query = {queryPool, firstQuery + i};
5548        auto queryElement = queriesInFlight.find(query);
5549        auto queryToStateElement = dev_data->queryToStateMap.find(query);
5550        if (queryToStateElement != dev_data->queryToStateMap.end()) {
5551            // Available and in flight
5552            if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5553                queryToStateElement->second) {
5554                for (auto cmdBuffer : queryElement->second) {
5555                    pCB = getCBNode(dev_data, cmdBuffer);
5556                    auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
5557                    if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
5558                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5559                                             VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5560                                             "Cannot get query results on queryPool %" PRIu64 " with index %d which is in flight.",
5561                                             (uint64_t)(queryPool), firstQuery + i);
5562                    } else {
5563                        for (auto event : queryEventElement->second) {
5564                            dev_data->eventMap[event].needsSignaled = true;
5565                        }
5566                    }
5567                }
5568                // Unavailable and in flight
5569            } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5570                       !queryToStateElement->second) {
5571                // TODO : Can there be the same query in use by multiple command buffers in flight?
5572                bool make_available = false;
5573                for (auto cmdBuffer : queryElement->second) {
5574                    pCB = getCBNode(dev_data, cmdBuffer);
5575                    make_available |= pCB->queryToStateMap[query];
5576                }
5577                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5578                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5579                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5580                                         "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5581                                         (uint64_t)(queryPool), firstQuery + i);
5582                }
5583                // Unavailable
5584            } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5585                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5586                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5587                                     "Cannot get query results on queryPool %" PRIu64 " with index %d which is unavailable.",
5588                                     (uint64_t)(queryPool), firstQuery + i);
5589                // Unitialized
5590            } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5591                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5592                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5593                                     "Cannot get query results on queryPool %" PRIu64
5594                                     " with index %d as data has not been collected for this index.",
5595                                     (uint64_t)(queryPool), firstQuery + i);
5596            }
5597        }
5598    }
5599    lock.unlock();
5600    if (skip_call)
5601        return VK_ERROR_VALIDATION_FAILED_EXT;
5602    return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride,
5603                                                                flags);
5604}
5605
5606static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5607    bool skip_call = false;
5608    auto buffer_data = my_data->bufferMap.find(buffer);
5609    if (buffer_data == my_data->bufferMap.end()) {
5610        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5611                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5612                             "Cannot free buffer %" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5613    } else {
5614        if (buffer_data->second.in_use.load()) {
5615            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5616                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5617                                 "Cannot free buffer %" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5618        }
5619    }
5620    return skip_call;
5621}
5622
5623static bool print_memory_range_error(layer_data *dev_data, const uint64_t object_handle, const uint64_t other_handle,
5624                                     VkDebugReportObjectTypeEXT object_type) {
5625    if (object_type == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) {
5626        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5627                       MEMTRACK_INVALID_ALIASING, "MEM", "Buffer %" PRIx64 " is aliased with image %" PRIx64, object_handle,
5628                       other_handle);
5629    } else {
5630        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5631                       MEMTRACK_INVALID_ALIASING, "MEM", "Image %" PRIx64 " is aliased with buffer %" PRIx64, object_handle,
5632                       other_handle);
5633    }
5634}
5635
5636static bool validate_memory_range(layer_data *dev_data, const vector<MEMORY_RANGE> &ranges, const MEMORY_RANGE &new_range,
5637                                  VkDebugReportObjectTypeEXT object_type) {
5638    bool skip_call = false;
5639
5640    for (auto range : ranges) {
5641        if ((range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) <
5642            (new_range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)))
5643            continue;
5644        if ((range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) >
5645            (new_range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)))
5646            continue;
5647        skip_call |= print_memory_range_error(dev_data, new_range.handle, range.handle, object_type);
5648    }
5649    return skip_call;
5650}
5651
5652static MEMORY_RANGE insert_memory_ranges(uint64_t handle, VkDeviceMemory mem, VkDeviceSize memoryOffset,
5653                                         VkMemoryRequirements memRequirements, vector<MEMORY_RANGE> &ranges) {
5654    MEMORY_RANGE range;
5655    range.handle = handle;
5656    range.memory = mem;
5657    range.start = memoryOffset;
5658    range.end = memoryOffset + memRequirements.size - 1;
5659    ranges.push_back(range);
5660    return range;
5661}
5662
5663static void remove_memory_ranges(uint64_t handle, VkDeviceMemory mem, vector<MEMORY_RANGE> &ranges) {
5664    for (uint32_t item = 0; item < ranges.size(); item++) {
5665        if ((ranges[item].handle == handle) && (ranges[item].memory == mem)) {
5666            ranges.erase(ranges.begin() + item);
5667            break;
5668        }
5669    }
5670}
5671
5672VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyBuffer(VkDevice device, VkBuffer buffer,
5673                                                           const VkAllocationCallbacks *pAllocator) {
5674    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5675    bool skipCall = false;
5676    std::unique_lock<std::mutex> lock(global_lock);
5677    if (!validateIdleBuffer(dev_data, buffer) && !skipCall) {
5678        lock.unlock();
5679        dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator);
5680        lock.lock();
5681    }
5682    // Clean up memory binding and range information for buffer
5683    const auto &bufferEntry = dev_data->bufferMap.find(buffer);
5684    if (bufferEntry != dev_data->bufferMap.end()) {
5685        const auto &memEntry = dev_data->memObjMap.find(bufferEntry->second.mem);
5686        if (memEntry != dev_data->memObjMap.end()) {
5687            remove_memory_ranges(reinterpret_cast<uint64_t &>(buffer), bufferEntry->second.mem, memEntry->second.bufferRanges);
5688        }
5689        clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5690        dev_data->bufferMap.erase(bufferEntry);
5691    }
5692}
5693
5694VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5695vkDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5696    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5697    dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
5698    std::lock_guard<std::mutex> lock(global_lock);
5699    auto item = dev_data->bufferViewMap.find(bufferView);
5700    if (item != dev_data->bufferViewMap.end()) {
5701        dev_data->bufferViewMap.erase(item);
5702    }
5703}
5704
5705VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5706    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5707    bool skipCall = false;
5708    if (!skipCall) {
5709        dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
5710    }
5711
5712    std::lock_guard<std::mutex> lock(global_lock);
5713    const auto &imageEntry = dev_data->imageMap.find(image);
5714    if (imageEntry != dev_data->imageMap.end()) {
5715        // Clean up memory mapping, bindings and range references for image
5716        auto memEntry = dev_data->memObjMap.find(imageEntry->second.mem);
5717        if (memEntry != dev_data->memObjMap.end()) {
5718            remove_memory_ranges(reinterpret_cast<uint64_t &>(image), imageEntry->second.mem, memEntry->second.imageRanges);
5719            clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5720            memEntry->second.image = VK_NULL_HANDLE;
5721        }
5722        // Remove image from imageMap
5723        dev_data->imageMap.erase(imageEntry);
5724    }
5725    const auto& subEntry = dev_data->imageSubresourceMap.find(image);
5726    if (subEntry != dev_data->imageSubresourceMap.end()) {
5727        for (const auto& pair : subEntry->second) {
5728            dev_data->imageLayoutMap.erase(pair);
5729        }
5730        dev_data->imageSubresourceMap.erase(subEntry);
5731    }
5732}
5733
5734#if MTMERGESOURCE
5735VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
5736vkBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5737    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5738    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5739    std::unique_lock<std::mutex> lock(global_lock);
5740    // Track objects tied to memory
5741    uint64_t buffer_handle = (uint64_t)(buffer);
5742    bool skipCall =
5743        set_mem_binding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5744    auto buffer_node = dev_data->bufferMap.find(buffer);
5745    if (buffer_node != dev_data->bufferMap.end()) {
5746        buffer_node->second.mem = mem;
5747        VkMemoryRequirements memRequirements;
5748        dev_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, &memRequirements);
5749
5750        // Track and validate bound memory range information
5751        const auto &memEntry = dev_data->memObjMap.find(mem);
5752        if (memEntry != dev_data->memObjMap.end()) {
5753            const MEMORY_RANGE range =
5754                insert_memory_ranges(buffer_handle, mem, memoryOffset, memRequirements, memEntry->second.bufferRanges);
5755            skipCall |=
5756                validate_memory_range(dev_data, memEntry->second.imageRanges, range, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5757        }
5758
5759        // Validate memory requirements alignment
5760        if (vk_safe_modulo(memoryOffset, memRequirements.alignment) != 0) {
5761            skipCall |=
5762                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
5763                        __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS",
5764                        "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be an integer multiple of the "
5765                        "VkMemoryRequirements::alignment value %#" PRIxLEAST64
5766                        ", returned from a call to vkGetBufferMemoryRequirements with buffer",
5767                        memoryOffset, memRequirements.alignment);
5768        }
5769        // Validate device limits alignments
5770        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].createInfo.usage;
5771        if (usage & (VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)) {
5772            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment) != 0) {
5773                skipCall |=
5774                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5775                            0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS",
5776                            "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be a multiple of "
5777                            "device limit minTexelBufferOffsetAlignment %#" PRIxLEAST64,
5778                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment);
5779            }
5780        }
5781        if (usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) {
5782            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) !=
5783                0) {
5784                skipCall |=
5785                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5786                            0, __LINE__, DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
5787                            "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be a multiple of "
5788                            "device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64,
5789                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
5790            }
5791        }
5792        if (usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) {
5793            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) !=
5794                0) {
5795                skipCall |=
5796                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5797                            0, __LINE__, DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
5798                            "vkBindBufferMemory(): memoryOffset is %#" PRIxLEAST64 " but must be a multiple of "
5799                            "device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64,
5800                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
5801            }
5802        }
5803    }
5804    print_mem_list(dev_data);
5805    lock.unlock();
5806    if (!skipCall) {
5807        result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset);
5808    }
5809    return result;
5810}
5811
5812VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5813vkGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5814    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5815    // TODO : What to track here?
5816    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
5817    my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5818}
5819
5820VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5821vkGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5822    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5823    // TODO : What to track here?
5824    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
5825    my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements);
5826}
5827#endif
5828VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5829vkDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5830    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5831        ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
5832    // TODO : Clean up any internal data structures using this obj.
5833}
5834
5835VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5836vkDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
5837    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5838
5839    std::unique_lock<std::mutex> lock(global_lock);
5840    my_data->shaderModuleMap.erase(shaderModule);
5841    lock.unlock();
5842
5843    my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
5844}
5845
5846VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5847vkDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
5848    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator);
5849    // TODO : Clean up any internal data structures using this obj.
5850}
5851
5852VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5853vkDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
5854    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5855        ->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
5856    // TODO : Clean up any internal data structures using this obj.
5857}
5858
5859VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5860vkDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
5861    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator);
5862    // TODO : Clean up any internal data structures using this obj.
5863}
5864
5865VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5866vkDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
5867    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5868        ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
5869    // TODO : Clean up any internal data structures using this obj.
5870}
5871
5872VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5873vkDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
5874    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5875        ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
5876    // TODO : Clean up any internal data structures using this obj.
5877}
5878// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result
5879//  If this is a secondary command buffer, then make sure its primary is also in-flight
5880//  If primary is not in-flight, then remove secondary from global in-flight set
5881// This function is only valid at a point when cmdBuffer is being reset or freed
5882static bool checkAndClearCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action) {
5883    bool skip_call = false;
5884    if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
5885        // Primary CB or secondary where primary is also in-flight is an error
5886        if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
5887            (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
5888            skip_call |= log_msg(
5889                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5890                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
5891                "Attempt to %s command buffer (%#" PRIxLEAST64 ") which is in use.", action,
5892                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer));
5893        } else { // Secondary CB w/o primary in-flight, remove from in-flight
5894            dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer);
5895        }
5896    }
5897    return skip_call;
5898}
5899// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
5900static bool checkAndClearCommandBuffersInFlight(layer_data *dev_data, const VkCommandPool commandPool, const char *action) {
5901    bool skip_call = false;
5902    auto pool_data = dev_data->commandPoolMap.find(commandPool);
5903    if (pool_data != dev_data->commandPoolMap.end()) {
5904        for (auto cmd_buffer : pool_data->second.commandBuffers) {
5905            if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
5906                skip_call |= checkAndClearCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action);
5907            }
5908        }
5909    }
5910    return skip_call;
5911}
5912
5913VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5914vkFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
5915    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5916
5917    bool skip_call = false;
5918    std::unique_lock<std::mutex> lock(global_lock);
5919    for (uint32_t i = 0; i < commandBufferCount; i++) {
5920        auto cb_pair = dev_data->commandBufferMap.find(pCommandBuffers[i]);
5921        skip_call |= checkAndClearCommandBufferInFlight(dev_data, cb_pair->second, "free");
5922        // Delete CB information structure, and remove from commandBufferMap
5923        if (cb_pair != dev_data->commandBufferMap.end()) {
5924            // reset prior to delete for data clean-up
5925            resetCB(dev_data, (*cb_pair).second->commandBuffer);
5926            delete (*cb_pair).second;
5927            dev_data->commandBufferMap.erase(cb_pair);
5928        }
5929
5930        // Remove commandBuffer reference from commandPoolMap
5931        dev_data->commandPoolMap[commandPool].commandBuffers.remove(pCommandBuffers[i]);
5932    }
5933#if MTMERGESOURCE
5934    printCBList(dev_data);
5935#endif
5936    lock.unlock();
5937
5938    if (!skip_call)
5939        dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
5940}
5941
5942VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
5943                                                                   const VkAllocationCallbacks *pAllocator,
5944                                                                   VkCommandPool *pCommandPool) {
5945    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5946
5947    VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
5948
5949    if (VK_SUCCESS == result) {
5950        std::lock_guard<std::mutex> lock(global_lock);
5951        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
5952        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
5953    }
5954    return result;
5955}
5956
5957VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
5958                                                                 const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
5959
5960    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5961    VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
5962    if (result == VK_SUCCESS) {
5963        std::lock_guard<std::mutex> lock(global_lock);
5964        dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo;
5965    }
5966    return result;
5967}
5968
5969// Destroy commandPool along with all of the commandBuffers allocated from that pool
5970VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
5971vkDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
5972    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5973    bool skipCall = false;
5974    std::unique_lock<std::mutex> lock(global_lock);
5975    // Verify that command buffers in pool are complete (not in-flight)
5976    VkBool32 result = checkAndClearCommandBuffersInFlight(dev_data, commandPool, "destroy command pool with");
5977    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandPoolMap
5978    if (dev_data->commandPoolMap.find(commandPool) != dev_data->commandPoolMap.end()) {
5979        for (auto poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
5980             poolCb != dev_data->commandPoolMap[commandPool].commandBuffers.end();) {
5981            clear_cmd_buf_and_mem_references(dev_data, *poolCb);
5982            auto del_cb = dev_data->commandBufferMap.find(*poolCb);
5983            delete (*del_cb).second;                  // delete CB info structure
5984            dev_data->commandBufferMap.erase(del_cb); // Remove this command buffer
5985            poolCb = dev_data->commandPoolMap[commandPool].commandBuffers.erase(
5986                poolCb); // Remove CB reference from commandPoolMap's list
5987        }
5988    }
5989    dev_data->commandPoolMap.erase(commandPool);
5990
5991    lock.unlock();
5992
5993    if (result)
5994        return;
5995
5996    if (!skipCall)
5997        dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
5998}
5999
6000VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6001vkResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
6002    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6003    bool skipCall = false;
6004    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6005
6006    if (checkAndClearCommandBuffersInFlight(dev_data, commandPool, "reset command pool with"))
6007        return VK_ERROR_VALIDATION_FAILED_EXT;
6008
6009    if (!skipCall)
6010        result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
6011
6012    // Reset all of the CBs allocated from this pool
6013    if (VK_SUCCESS == result) {
6014        std::lock_guard<std::mutex> lock(global_lock);
6015        auto it = dev_data->commandPoolMap[commandPool].commandBuffers.begin();
6016        while (it != dev_data->commandPoolMap[commandPool].commandBuffers.end()) {
6017            resetCB(dev_data, (*it));
6018            ++it;
6019        }
6020    }
6021    return result;
6022}
6023
6024VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
6025    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6026    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6027    bool skipCall = false;
6028    std::unique_lock<std::mutex> lock(global_lock);
6029    for (uint32_t i = 0; i < fenceCount; ++i) {
6030        auto fence_item = dev_data->fenceMap.find(pFences[i]);
6031        if (fence_item != dev_data->fenceMap.end()) {
6032            fence_item->second.needsSignaled = true;
6033            if (fence_item->second.in_use.load()) {
6034                skipCall |=
6035                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6036                            reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
6037                            "Fence %#" PRIx64 " is in use by a command buffer.", reinterpret_cast<const uint64_t &>(pFences[i]));
6038            }
6039        }
6040    }
6041    lock.unlock();
6042    if (!skipCall)
6043        result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences);
6044    return result;
6045}
6046
6047VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6048vkDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
6049    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6050    std::unique_lock<std::mutex> lock(global_lock);
6051    auto fbNode = dev_data->frameBufferMap.find(framebuffer);
6052    if (fbNode != dev_data->frameBufferMap.end()) {
6053        for (auto cb : fbNode->second.referencingCmdBuffers) {
6054            auto cbNode = dev_data->commandBufferMap.find(cb);
6055            if (cbNode != dev_data->commandBufferMap.end()) {
6056                // Set CB as invalid and record destroyed framebuffer
6057                cbNode->second->state = CB_INVALID;
6058                cbNode->second->destroyedFramebuffers.insert(framebuffer);
6059            }
6060        }
6061        delete [] fbNode->second.createInfo.pAttachments;
6062        dev_data->frameBufferMap.erase(fbNode);
6063    }
6064    lock.unlock();
6065    dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
6066}
6067
6068VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6069vkDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
6070    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6071    dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
6072    std::lock_guard<std::mutex> lock(global_lock);
6073    dev_data->renderPassMap.erase(renderPass);
6074}
6075
6076VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
6077                                                              const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
6078    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6079
6080    VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
6081
6082    if (VK_SUCCESS == result) {
6083        std::lock_guard<std::mutex> lock(global_lock);
6084        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
6085        dev_data->bufferMap[*pBuffer].createInfo = *pCreateInfo;
6086        dev_data->bufferMap[*pBuffer].in_use.store(0);
6087    }
6088    return result;
6089}
6090
6091VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6092                                                                  const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
6093    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6094    VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
6095    if (VK_SUCCESS == result) {
6096        std::lock_guard<std::mutex> lock(global_lock);
6097        dev_data->bufferViewMap[*pView] = VkBufferViewCreateInfo(*pCreateInfo);
6098#if MTMERGESOURCE
6099        // In order to create a valid buffer view, the buffer must have been created with at least one of the
6100        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
6101        validate_buffer_usage_flags(dev_data, pCreateInfo->buffer,
6102                                    VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false,
6103                                    "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
6104#endif
6105    }
6106    return result;
6107}
6108
6109VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
6110                                                             const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
6111    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6112
6113    VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
6114
6115    if (VK_SUCCESS == result) {
6116        std::lock_guard<std::mutex> lock(global_lock);
6117        IMAGE_LAYOUT_NODE image_node;
6118        image_node.layout = pCreateInfo->initialLayout;
6119        image_node.format = pCreateInfo->format;
6120        dev_data->imageMap[*pImage].createInfo = *pCreateInfo;
6121        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
6122        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
6123        dev_data->imageLayoutMap[subpair] = image_node;
6124    }
6125    return result;
6126}
6127
6128static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
6129    /* expects global_lock to be held by caller */
6130
6131    auto image_node_it = dev_data->imageMap.find(image);
6132    if (image_node_it != dev_data->imageMap.end()) {
6133        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
6134         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
6135         * the actual values.
6136         */
6137        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
6138            range->levelCount = image_node_it->second.createInfo.mipLevels - range->baseMipLevel;
6139        }
6140
6141        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
6142            range->layerCount = image_node_it->second.createInfo.arrayLayers - range->baseArrayLayer;
6143        }
6144    }
6145}
6146
6147// Return the correct layer/level counts if the caller used the special
6148// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
6149static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6150                                         VkImage image) {
6151    /* expects global_lock to be held by caller */
6152
6153    *levels = range.levelCount;
6154    *layers = range.layerCount;
6155    auto image_node_it = dev_data->imageMap.find(image);
6156    if (image_node_it != dev_data->imageMap.end()) {
6157        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
6158            *levels = image_node_it->second.createInfo.mipLevels - range.baseMipLevel;
6159        }
6160        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
6161            *layers = image_node_it->second.createInfo.arrayLayers - range.baseArrayLayer;
6162        }
6163    }
6164}
6165
6166VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6167                                                                 const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6168    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6169    VkResult result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
6170    if (VK_SUCCESS == result) {
6171        std::lock_guard<std::mutex> lock(global_lock);
6172        VkImageViewCreateInfo localCI = VkImageViewCreateInfo(*pCreateInfo);
6173        ResolveRemainingLevelsLayers(dev_data, &localCI.subresourceRange, pCreateInfo->image);
6174        dev_data->imageViewMap[*pView] = localCI;
6175#if MTMERGESOURCE
6176        // Validate that img has correct usage flags set
6177        validate_image_usage_flags(dev_data, pCreateInfo->image,
6178                                   VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT |
6179                                       VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6180                                   false, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT");
6181#endif
6182    }
6183    return result;
6184}
6185
6186VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6187vkCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6188    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6189    VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
6190    if (VK_SUCCESS == result) {
6191        std::lock_guard<std::mutex> lock(global_lock);
6192        auto &fence_node = dev_data->fenceMap[*pFence];
6193        fence_node.createInfo = *pCreateInfo;
6194        fence_node.needsSignaled = true;
6195        if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
6196            fence_node.firstTimeFlag = true;
6197            fence_node.needsSignaled = false;
6198        }
6199        fence_node.in_use.store(0);
6200    }
6201    return result;
6202}
6203
6204// TODO handle pipeline caches
6205VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6206                                                     const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6207    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6208    VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6209    return result;
6210}
6211
6212VKAPI_ATTR void VKAPI_CALL
6213vkDestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
6214    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6215    dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator);
6216}
6217
6218VKAPI_ATTR VkResult VKAPI_CALL
6219vkGetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
6220    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6221    VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6222    return result;
6223}
6224
6225VKAPI_ATTR VkResult VKAPI_CALL
6226vkMergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
6227    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6228    VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6229    return result;
6230}
6231
6232// utility function to set collective state for pipeline
6233void set_pipeline_state(PIPELINE_NODE *pPipe) {
6234    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
6235    if (pPipe->graphicsPipelineCI.pColorBlendState) {
6236        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
6237            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
6238                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6239                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6240                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6241                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6242                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6243                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6244                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6245                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
6246                    pPipe->blendConstantsEnabled = true;
6247                }
6248            }
6249        }
6250    }
6251}
6252
6253VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6254vkCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6255                          const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6256                          VkPipeline *pPipelines) {
6257    VkResult result = VK_SUCCESS;
6258    // TODO What to do with pipelineCache?
6259    // The order of operations here is a little convoluted but gets the job done
6260    //  1. Pipeline create state is first shadowed into PIPELINE_NODE struct
6261    //  2. Create state is then validated (which uses flags setup during shadowing)
6262    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6263    bool skipCall = false;
6264    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6265    vector<PIPELINE_NODE *> pPipeNode(count);
6266    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6267
6268    uint32_t i = 0;
6269    std::unique_lock<std::mutex> lock(global_lock);
6270
6271    for (i = 0; i < count; i++) {
6272        pPipeNode[i] = new PIPELINE_NODE;
6273        pPipeNode[i]->initGraphicsPipeline(&pCreateInfos[i]);
6274        skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
6275    }
6276
6277    if (!skipCall) {
6278        lock.unlock();
6279        result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6280                                                                          pPipelines);
6281        lock.lock();
6282        for (i = 0; i < count; i++) {
6283            pPipeNode[i]->pipeline = pPipelines[i];
6284            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6285        }
6286        lock.unlock();
6287    } else {
6288        for (i = 0; i < count; i++) {
6289            delete pPipeNode[i];
6290        }
6291        lock.unlock();
6292        return VK_ERROR_VALIDATION_FAILED_EXT;
6293    }
6294    return result;
6295}
6296
6297VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6298vkCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6299                         const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6300                         VkPipeline *pPipelines) {
6301    VkResult result = VK_SUCCESS;
6302    bool skipCall = false;
6303
6304    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6305    vector<PIPELINE_NODE *> pPipeNode(count);
6306    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6307
6308    uint32_t i = 0;
6309    std::unique_lock<std::mutex> lock(global_lock);
6310    for (i = 0; i < count; i++) {
6311        // TODO: Verify compute stage bits
6312
6313        // Create and initialize internal tracking data structure
6314        pPipeNode[i] = new PIPELINE_NODE;
6315        pPipeNode[i]->initComputePipeline(&pCreateInfos[i]);
6316        // memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
6317
6318        // TODO: Add Compute Pipeline Verification
6319        // skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
6320    }
6321
6322    if (!skipCall) {
6323        lock.unlock();
6324        result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6325                                                                         pPipelines);
6326        lock.lock();
6327        for (i = 0; i < count; i++) {
6328            pPipeNode[i]->pipeline = pPipelines[i];
6329            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6330        }
6331        lock.unlock();
6332    } else {
6333        for (i = 0; i < count; i++) {
6334            // Clean up any locally allocated data structures
6335            delete pPipeNode[i];
6336        }
6337        lock.unlock();
6338        return VK_ERROR_VALIDATION_FAILED_EXT;
6339    }
6340    return result;
6341}
6342
6343VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6344                                                               const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6345    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6346    VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6347    if (VK_SUCCESS == result) {
6348        std::lock_guard<std::mutex> lock(global_lock);
6349        dev_data->sampleMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
6350    }
6351    return result;
6352}
6353
6354VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6355vkCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6356                            const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
6357    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6358    VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6359    if (VK_SUCCESS == result) {
6360        // TODOSC : Capture layout bindings set
6361        std::lock_guard<std::mutex> lock(global_lock);
6362        dev_data->descriptorSetLayoutMap[*pSetLayout] = new DescriptorSetLayout(dev_data->report_data, pCreateInfo, *pSetLayout);
6363    }
6364    return result;
6365}
6366
6367static bool validatePushConstantSize(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6368                                     const char *caller_name) {
6369    bool skipCall = false;
6370    if ((offset + size) > dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize) {
6371        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6372                           DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
6373                                                                 "exceeds this device's maxPushConstantSize of %u.",
6374                           caller_name, offset, size, dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize);
6375    }
6376    return skipCall;
6377}
6378
6379VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6380                                                      const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
6381    bool skipCall = false;
6382    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6383    uint32_t i = 0;
6384    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6385        skipCall |= validatePushConstantSize(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
6386                                             pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()");
6387        if ((pCreateInfo->pPushConstantRanges[i].size == 0) || ((pCreateInfo->pPushConstantRanges[i].size & 0x3) != 0)) {
6388            skipCall |=
6389                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6390                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constant index %u with "
6391                                                              "size %u. Size must be greater than zero and a multiple of 4.",
6392                        i, pCreateInfo->pPushConstantRanges[i].size);
6393        }
6394        // TODO : Add warning if ranges overlap
6395    }
6396    VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
6397    if (VK_SUCCESS == result) {
6398        std::lock_guard<std::mutex> lock(global_lock);
6399        // TODOSC : Merge capture of the setLayouts per pipeline
6400        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
6401        plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount);
6402        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6403            plNode.descriptorSetLayouts[i] = pCreateInfo->pSetLayouts[i];
6404        }
6405        plNode.pushConstantRanges.resize(pCreateInfo->pushConstantRangeCount);
6406        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6407            plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i];
6408        }
6409    }
6410    return result;
6411}
6412
6413VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6414vkCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
6415                       VkDescriptorPool *pDescriptorPool) {
6416    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6417    VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
6418    if (VK_SUCCESS == result) {
6419        // Insert this pool into Global Pool LL at head
6420        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6421                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool %#" PRIxLEAST64,
6422                    (uint64_t)*pDescriptorPool))
6423            return VK_ERROR_VALIDATION_FAILED_EXT;
6424        DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
6425        if (NULL == pNewNode) {
6426            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6427                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6428                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
6429                return VK_ERROR_VALIDATION_FAILED_EXT;
6430        } else {
6431            std::lock_guard<std::mutex> lock(global_lock);
6432            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6433        }
6434    } else {
6435        // Need to do anything if pool create fails?
6436    }
6437    return result;
6438}
6439
6440VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6441vkResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
6442    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6443    VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags);
6444    if (VK_SUCCESS == result) {
6445        std::lock_guard<std::mutex> lock(global_lock);
6446        clearDescriptorPool(dev_data, device, descriptorPool, flags);
6447    }
6448    return result;
6449}
6450
6451VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6452vkAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
6453    bool skipCall = false;
6454    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6455
6456    std::unique_lock<std::mutex> lock(global_lock);
6457    // Verify that requested descriptorSets are available in pool
6458    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6459    if (!pPoolNode) {
6460        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6461                            (uint64_t)pAllocateInfo->descriptorPool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
6462                            "Unable to find pool node for pool %#" PRIxLEAST64 " specified in vkAllocateDescriptorSets() call",
6463                            (uint64_t)pAllocateInfo->descriptorPool);
6464    } else { // Make sure pool has all the available descriptors before calling down chain
6465        skipCall |= validate_descriptor_availability_in_pool(dev_data, pPoolNode, pAllocateInfo->descriptorSetCount,
6466                                                             pAllocateInfo->pSetLayouts);
6467    }
6468    lock.unlock();
6469    if (skipCall)
6470        return VK_ERROR_VALIDATION_FAILED_EXT;
6471    VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6472    if (VK_SUCCESS == result) {
6473        lock.lock();
6474        DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, pAllocateInfo->descriptorPool);
6475        if (pPoolNode) {
6476            if (pAllocateInfo->descriptorSetCount == 0) {
6477                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6478                        pAllocateInfo->descriptorSetCount, __LINE__, DRAWSTATE_NONE, "DS",
6479                        "AllocateDescriptorSets called with 0 count");
6480            }
6481            for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
6482                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6483                        (uint64_t)pDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS", "Created Descriptor Set %#" PRIxLEAST64,
6484                        (uint64_t)pDescriptorSets[i]);
6485                // Create new set node and add to head of pool nodes
6486                SET_NODE *pNewNode = new SET_NODE;
6487                if (NULL == pNewNode) {
6488                    if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6489                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6490                                DRAWSTATE_OUT_OF_MEMORY, "DS",
6491                                "Out of memory while attempting to allocate SET_NODE in vkAllocateDescriptorSets()")) {
6492                        lock.unlock();
6493                        return VK_ERROR_VALIDATION_FAILED_EXT;
6494                    }
6495                } else {
6496                    // TODO : Pool should store a total count of each type of Descriptor available
6497                    //  When descriptors are allocated, decrement the count and validate here
6498                    //  that the count doesn't go below 0. One reset/free need to bump count back up.
6499                    // Insert set at head of Set LL for this pool
6500                    pNewNode->pNext = pPoolNode->pSets;
6501                    pNewNode->in_use.store(0);
6502                    pPoolNode->pSets = pNewNode;
6503                    auto layout_pair = dev_data->descriptorSetLayoutMap.find(pAllocateInfo->pSetLayouts[i]);
6504                    if (layout_pair == dev_data->descriptorSetLayoutMap.end()) {
6505                        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6506                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, (uint64_t)pAllocateInfo->pSetLayouts[i],
6507                                    __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
6508                                    "Unable to find set layout node for layout %#" PRIxLEAST64
6509                                    " specified in vkAllocateDescriptorSets() call",
6510                                    (uint64_t)pAllocateInfo->pSetLayouts[i])) {
6511                            lock.unlock();
6512                            return VK_ERROR_VALIDATION_FAILED_EXT;
6513                        }
6514                    }
6515                    pNewNode->p_layout = layout_pair->second;
6516                    pNewNode->pool = pAllocateInfo->descriptorPool;
6517                    pNewNode->set = pDescriptorSets[i];
6518                    pNewNode->descriptorCount = layout_pair->second->GetTotalDescriptorCount();
6519                    if (pNewNode->descriptorCount) {
6520                        pNewNode->pDescriptorUpdates.resize(pNewNode->descriptorCount);
6521                    }
6522                    dev_data->setMap[pDescriptorSets[i]] = pNewNode;
6523                }
6524            }
6525        }
6526        lock.unlock();
6527    }
6528    return result;
6529}
6530
6531VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6532vkFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6533    bool skipCall = false;
6534    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6535    // Make sure that no sets being destroyed are in-flight
6536    std::unique_lock<std::mutex> lock(global_lock);
6537    for (uint32_t i = 0; i < count; ++i)
6538        skipCall |= validateIdleDescriptorSet(dev_data, pDescriptorSets[i], "vkFreeDescriptorSets");
6539    DESCRIPTOR_POOL_NODE *pPoolNode = getPoolNode(dev_data, descriptorPool);
6540    if (pPoolNode && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pPoolNode->createInfo.flags)) {
6541        // Can't Free from a NON_FREE pool
6542        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
6543                            (uint64_t)device, __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
6544                            "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6545                            "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
6546    }
6547    lock.unlock();
6548    if (skipCall)
6549        return VK_ERROR_VALIDATION_FAILED_EXT;
6550    VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6551    if (VK_SUCCESS == result) {
6552        lock.lock();
6553
6554        // Update available descriptor sets in pool
6555        pPoolNode->availableSets += count;
6556
6557        // For each freed descriptor add it back into the pool as available
6558        for (uint32_t i = 0; i < count; ++i) {
6559            SET_NODE *pSet = dev_data->setMap[pDescriptorSets[i]]; // getSetNode() without locking
6560            invalidateBoundCmdBuffers(dev_data, pSet);
6561            auto p_layout = pSet->p_layout;
6562            uint32_t typeIndex = 0, poolSizeCount = 0;
6563            for (uint32_t j = 0; j < p_layout->GetBindingCount(); ++j) {
6564                auto layout_binding = p_layout->GetDescriptorSetLayoutBindingPtrFromIndex(j);
6565                typeIndex = static_cast<uint32_t>(layout_binding->descriptorType);
6566                poolSizeCount = layout_binding->descriptorCount;
6567                pPoolNode->availableDescriptorTypeCount[typeIndex] += poolSizeCount;
6568            }
6569        }
6570        lock.unlock();
6571    }
6572    // TODO : Any other clean-up or book-keeping to do here?
6573    return result;
6574}
6575
6576VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6577vkUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6578                       uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6579    // dsUpdate will return true only if a bailout error occurs, so we want to call down tree when update returns false
6580    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6581    std::unique_lock<std::mutex> lock(global_lock);
6582    bool rtn = dsUpdate(dev_data, device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
6583    lock.unlock();
6584    if (!rtn) {
6585        dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6586                                                              pDescriptorCopies);
6587    }
6588}
6589
6590VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6591vkAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6592    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6593    VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6594    if (VK_SUCCESS == result) {
6595        std::unique_lock<std::mutex> lock(global_lock);
6596        auto const &cp_it = dev_data->commandPoolMap.find(pCreateInfo->commandPool);
6597        if (cp_it != dev_data->commandPoolMap.end()) {
6598            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6599                // Add command buffer to its commandPool map
6600                cp_it->second.commandBuffers.push_back(pCommandBuffer[i]);
6601                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6602                // Add command buffer to map
6603                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6604                resetCB(dev_data, pCommandBuffer[i]);
6605                pCB->createInfo = *pCreateInfo;
6606                pCB->device = device;
6607            }
6608        }
6609#if MTMERGESOURCE
6610        printCBList(dev_data);
6611#endif
6612        lock.unlock();
6613    }
6614    return result;
6615}
6616
6617VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6618vkBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6619    bool skipCall = false;
6620    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6621    std::unique_lock<std::mutex> lock(global_lock);
6622    // Validate command buffer level
6623    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6624    if (pCB) {
6625        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6626        if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
6627            skipCall |=
6628                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6629                        (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6630                        "Calling vkBeginCommandBuffer() on active CB %p before it has completed. "
6631                        "You must check CB fence before this call.",
6632                        commandBuffer);
6633        }
6634        clear_cmd_buf_and_mem_references(dev_data, pCB);
6635        if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6636            // Secondary Command Buffer
6637            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6638            if (!pInfo) {
6639                skipCall |=
6640                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6641                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6642                            "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must have inheritance info.",
6643                            reinterpret_cast<void *>(commandBuffer));
6644            } else {
6645                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6646                    if (!pInfo->renderPass) { // renderpass should NOT be null for a Secondary CB
6647                        skipCall |= log_msg(
6648                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6649                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6650                            "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must specify a valid renderpass parameter.",
6651                            reinterpret_cast<void *>(commandBuffer));
6652                    }
6653                    if (!pInfo->framebuffer) { // framebuffer may be null for a Secondary CB, but this affects perf
6654                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6655                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6656                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE,
6657                                            "DS", "vkBeginCommandBuffer(): Secondary Command Buffers (%p) may perform better if a "
6658                                                  "valid framebuffer parameter is specified.",
6659                                            reinterpret_cast<void *>(commandBuffer));
6660                    } else {
6661                        string errorString = "";
6662                        auto fbNode = dev_data->frameBufferMap.find(pInfo->framebuffer);
6663                        if (fbNode != dev_data->frameBufferMap.end()) {
6664                            VkRenderPass fbRP = fbNode->second.createInfo.renderPass;
6665                            if (!verify_renderpass_compatibility(dev_data, fbRP, pInfo->renderPass, errorString)) {
6666                                // renderPass that framebuffer was created with must be compatible with local renderPass
6667                                skipCall |=
6668                                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6669                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6670                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
6671                                            "DS", "vkBeginCommandBuffer(): Secondary Command "
6672                                                  "Buffer (%p) renderPass (%#" PRIxLEAST64 ") is incompatible w/ framebuffer "
6673                                                  "(%#" PRIxLEAST64 ") w/ render pass (%#" PRIxLEAST64 ") due to: %s",
6674                                            reinterpret_cast<void *>(commandBuffer), (uint64_t)(pInfo->renderPass),
6675                                            (uint64_t)(pInfo->framebuffer), (uint64_t)(fbRP), errorString.c_str());
6676                            }
6677                            // Connect this framebuffer to this cmdBuffer
6678                            fbNode->second.referencingCmdBuffers.insert(pCB->commandBuffer);
6679                        }
6680                    }
6681                }
6682                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
6683                     dev_data->phys_dev_properties.features.occlusionQueryPrecise == VK_FALSE) &&
6684                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
6685                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6686                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6687                                        __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6688                                        "vkBeginCommandBuffer(): Secondary Command Buffer (%p) must not have "
6689                                        "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
6690                                        "support precise occlusion queries.",
6691                                        reinterpret_cast<void *>(commandBuffer));
6692                }
6693            }
6694            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
6695                auto rp_data = dev_data->renderPassMap.find(pInfo->renderPass);
6696                if (rp_data != dev_data->renderPassMap.end() && rp_data->second && rp_data->second->pCreateInfo) {
6697                    if (pInfo->subpass >= rp_data->second->pCreateInfo->subpassCount) {
6698                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6699                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
6700                                            DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6701                                            "vkBeginCommandBuffer(): Secondary Command Buffers (%p) must has a subpass index (%d) "
6702                                            "that is less than the number of subpasses (%d).",
6703                                            (void *)commandBuffer, pInfo->subpass, rp_data->second->pCreateInfo->subpassCount);
6704                    }
6705                }
6706            }
6707        }
6708        if (CB_RECORDING == pCB->state) {
6709            skipCall |=
6710                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6711                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6712                        "vkBeginCommandBuffer(): Cannot call Begin on CB (%#" PRIxLEAST64
6713                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
6714                        (uint64_t)commandBuffer);
6715        } else if (CB_RECORDED == pCB->state) {
6716            VkCommandPool cmdPool = pCB->createInfo.commandPool;
6717            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
6718                skipCall |=
6719                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6720                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6721                            "Call to vkBeginCommandBuffer() on command buffer (%#" PRIxLEAST64
6722                            ") attempts to implicitly reset cmdBuffer created from command pool (%#" PRIxLEAST64
6723                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6724                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
6725            }
6726            resetCB(dev_data, commandBuffer);
6727        }
6728        // Set updated state here in case implicit reset occurs above
6729        pCB->state = CB_RECORDING;
6730        pCB->beginInfo = *pBeginInfo;
6731        if (pCB->beginInfo.pInheritanceInfo) {
6732            pCB->inheritanceInfo = *(pCB->beginInfo.pInheritanceInfo);
6733            pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo;
6734        }
6735    } else {
6736        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6737                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
6738                            "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB %p!", (void *)commandBuffer);
6739    }
6740    lock.unlock();
6741    if (skipCall) {
6742        return VK_ERROR_VALIDATION_FAILED_EXT;
6743    }
6744    VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo);
6745
6746    return result;
6747}
6748
6749VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer(VkCommandBuffer commandBuffer) {
6750    bool skipCall = false;
6751    VkResult result = VK_SUCCESS;
6752    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6753    std::unique_lock<std::mutex> lock(global_lock);
6754    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6755    if (pCB) {
6756        if (pCB->state != CB_RECORDING) {
6757            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkEndCommandBuffer()");
6758        }
6759        for (auto query : pCB->activeQueries) {
6760            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6761                                DRAWSTATE_INVALID_QUERY, "DS",
6762                                "Ending command buffer with in progress query: queryPool %" PRIu64 ", index %d",
6763                                (uint64_t)(query.pool), query.index);
6764        }
6765    }
6766    if (!skipCall) {
6767        lock.unlock();
6768        result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
6769        lock.lock();
6770        if (VK_SUCCESS == result) {
6771            pCB->state = CB_RECORDED;
6772            // Reset CB status flags
6773            pCB->status = 0;
6774            printCB(dev_data, commandBuffer);
6775        }
6776    } else {
6777        result = VK_ERROR_VALIDATION_FAILED_EXT;
6778    }
6779    lock.unlock();
6780    return result;
6781}
6782
6783VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
6784vkResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
6785    bool skip_call = false;
6786    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6787    std::unique_lock<std::mutex> lock(global_lock);
6788    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6789    VkCommandPool cmdPool = pCB->createInfo.commandPool;
6790    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & dev_data->commandPoolMap[cmdPool].createFlags)) {
6791        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6792                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6793                             "Attempt to reset command buffer (%#" PRIxLEAST64 ") created from command pool (%#" PRIxLEAST64
6794                             ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6795                             (uint64_t)commandBuffer, (uint64_t)cmdPool);
6796    }
6797    skip_call |= checkAndClearCommandBufferInFlight(dev_data, pCB, "reset");
6798    lock.unlock();
6799    if (skip_call)
6800        return VK_ERROR_VALIDATION_FAILED_EXT;
6801    VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags);
6802    if (VK_SUCCESS == result) {
6803        lock.lock();
6804        resetCB(dev_data, commandBuffer);
6805        lock.unlock();
6806    }
6807    return result;
6808}
6809
6810#if MTMERGESOURCE
6811// TODO : For any vkCmdBind* calls that include an object which has mem bound to it,
6812//    need to account for that mem now having binding to given commandBuffer
6813#endif
6814VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6815vkCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
6816    bool skipCall = false;
6817    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6818    std::unique_lock<std::mutex> lock(global_lock);
6819    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6820    if (pCB) {
6821        skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
6822        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
6823            skipCall |=
6824                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6825                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
6826                        "Incorrectly binding compute pipeline (%#" PRIxLEAST64 ") during active RenderPass (%#" PRIxLEAST64 ")",
6827                        (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass);
6828        }
6829
6830        PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
6831        if (pPN) {
6832            pCB->lastBound[pipelineBindPoint].pipeline = pipeline;
6833            set_cb_pso_status(pCB, pPN);
6834            set_pipeline_state(pPN);
6835            skipCall |= validatePipelineState(dev_data, pCB, pipelineBindPoint, pipeline);
6836        } else {
6837            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6838                                (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
6839                                "Attempt to bind Pipeline %#" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
6840        }
6841    }
6842    lock.unlock();
6843    if (!skipCall)
6844        dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
6845}
6846
6847VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6848vkCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
6849    bool skipCall = false;
6850    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6851    std::unique_lock<std::mutex> lock(global_lock);
6852    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6853    if (pCB) {
6854        skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
6855        pCB->status |= CBSTATUS_VIEWPORT_SET;
6856        pCB->viewports.resize(viewportCount);
6857        memcpy(pCB->viewports.data(), pViewports, viewportCount * sizeof(VkViewport));
6858    }
6859    lock.unlock();
6860    if (!skipCall)
6861        dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
6862}
6863
6864VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6865vkCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
6866    bool skipCall = false;
6867    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6868    std::unique_lock<std::mutex> lock(global_lock);
6869    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6870    if (pCB) {
6871        skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
6872        pCB->status |= CBSTATUS_SCISSOR_SET;
6873        pCB->scissors.resize(scissorCount);
6874        memcpy(pCB->scissors.data(), pScissors, scissorCount * sizeof(VkRect2D));
6875    }
6876    lock.unlock();
6877    if (!skipCall)
6878        dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
6879}
6880
6881VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
6882    bool skip_call = false;
6883    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6884    std::unique_lock<std::mutex> lock(global_lock);
6885    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6886    if (pCB) {
6887        skip_call |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
6888        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
6889
6890        PIPELINE_NODE *pPipeTrav = getPipeline(dev_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
6891        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
6892            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
6893                                 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SET, "DS",
6894                                 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
6895                                 "flag.  This is undefined behavior and could be ignored.");
6896        } else {
6897            skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
6898        }
6899    }
6900    lock.unlock();
6901    if (!skip_call)
6902        dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth);
6903}
6904
6905VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6906vkCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
6907    bool skipCall = false;
6908    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6909    std::unique_lock<std::mutex> lock(global_lock);
6910    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6911    if (pCB) {
6912        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
6913        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
6914    }
6915    lock.unlock();
6916    if (!skipCall)
6917        dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp,
6918                                                         depthBiasSlopeFactor);
6919}
6920
6921VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
6922    bool skipCall = false;
6923    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6924    std::unique_lock<std::mutex> lock(global_lock);
6925    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6926    if (pCB) {
6927        skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
6928        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
6929    }
6930    lock.unlock();
6931    if (!skipCall)
6932        dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants);
6933}
6934
6935VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6936vkCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
6937    bool skipCall = false;
6938    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6939    std::unique_lock<std::mutex> lock(global_lock);
6940    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6941    if (pCB) {
6942        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
6943        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
6944    }
6945    lock.unlock();
6946    if (!skipCall)
6947        dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
6948}
6949
6950VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6951vkCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
6952    bool skipCall = false;
6953    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6954    std::unique_lock<std::mutex> lock(global_lock);
6955    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6956    if (pCB) {
6957        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
6958        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
6959    }
6960    lock.unlock();
6961    if (!skipCall)
6962        dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
6963}
6964
6965VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6966vkCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
6967    bool skipCall = false;
6968    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6969    std::unique_lock<std::mutex> lock(global_lock);
6970    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6971    if (pCB) {
6972        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
6973        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
6974    }
6975    lock.unlock();
6976    if (!skipCall)
6977        dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
6978}
6979
6980VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6981vkCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
6982    bool skipCall = false;
6983    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6984    std::unique_lock<std::mutex> lock(global_lock);
6985    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6986    if (pCB) {
6987        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
6988        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
6989    }
6990    lock.unlock();
6991    if (!skipCall)
6992        dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference);
6993}
6994
6995VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
6996vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
6997                        uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
6998                        const uint32_t *pDynamicOffsets) {
6999    bool skipCall = false;
7000    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7001    std::unique_lock<std::mutex> lock(global_lock);
7002    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7003    if (pCB) {
7004        if (pCB->state == CB_RECORDING) {
7005            // Track total count of dynamic descriptor types to make sure we have an offset for each one
7006            uint32_t totalDynamicDescriptors = 0;
7007            string errorString = "";
7008            uint32_t lastSetIndex = firstSet + setCount - 1;
7009            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size())
7010                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7011            VkDescriptorSet oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
7012            for (uint32_t i = 0; i < setCount; i++) {
7013                SET_NODE *pSet = getSetNode(dev_data, pDescriptorSets[i]);
7014                if (pSet) {
7015                    pCB->lastBound[pipelineBindPoint].uniqueBoundSets.insert(pDescriptorSets[i]);
7016                    pSet->boundCmdBuffers.insert(commandBuffer);
7017                    pCB->lastBound[pipelineBindPoint].pipelineLayout = layout;
7018                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pDescriptorSets[i];
7019                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7020                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7021                                        DRAWSTATE_NONE, "DS", "DS %#" PRIxLEAST64 " bound on pipeline %s",
7022                                        (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
7023                    if (!pSet->pUpdateStructs && (pSet->descriptorCount != 0)) {
7024                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7025                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
7026                                            __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
7027                                            "DS %#" PRIxLEAST64
7028                                            " bound but it was never updated. You may want to either update it or not bind it.",
7029                                            (uint64_t)pDescriptorSets[i]);
7030                    }
7031                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
7032                    if (!verify_set_layout_compatibility(dev_data, pSet, layout, i + firstSet, errorString)) {
7033                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7034                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7035                                            DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
7036                                            "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
7037                                            "at index %u of pipelineLayout %#" PRIxLEAST64 " due to: %s",
7038                                            i, i + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str());
7039                    }
7040                    if (pSet->p_layout->GetDynamicDescriptorCount()) {
7041                        // First make sure we won't overstep bounds of pDynamicOffsets array
7042                        if ((totalDynamicDescriptors + pSet->p_layout->GetDynamicDescriptorCount()) > dynamicOffsetCount) {
7043                            skipCall |=
7044                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7045                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7046                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7047                                        "descriptorSet #%u (%#" PRIxLEAST64
7048                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7049                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7050                                        i, (uint64_t)pDescriptorSets[i], pSet->p_layout->GetDynamicDescriptorCount(),
7051                                        (dynamicOffsetCount - totalDynamicDescriptors));
7052                        } else { // Validate and store dynamic offsets with the set
7053                            // Validate Dynamic Offset Minimums
7054                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
7055                            for (uint32_t d = 0; d < pSet->descriptorCount; d++) {
7056                                if (pSet->p_layout->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7057                                    if (vk_safe_modulo(
7058                                            pDynamicOffsets[cur_dyn_offset],
7059                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
7060                                        skipCall |= log_msg(
7061                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7062                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7063                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
7064                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7065                                            "device limit minUniformBufferOffsetAlignment %#" PRIxLEAST64,
7066                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7067                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
7068                                    }
7069                                    cur_dyn_offset++;
7070                                } else if (pSet->p_layout->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7071                                    if (vk_safe_modulo(
7072                                            pDynamicOffsets[cur_dyn_offset],
7073                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
7074                                        skipCall |= log_msg(
7075                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7076                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7077                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
7078                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7079                                            "device limit minStorageBufferOffsetAlignment %#" PRIxLEAST64,
7080                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7081                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
7082                                    }
7083                                    cur_dyn_offset++;
7084                                }
7085                            }
7086                            // Keep running total of dynamic descriptor count to verify at the end
7087                            totalDynamicDescriptors += pSet->p_layout->GetDynamicDescriptorCount();
7088                        }
7089                    }
7090                } else {
7091                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7092                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7093                                        DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS %#" PRIxLEAST64 " that doesn't exist!",
7094                                        (uint64_t)pDescriptorSets[i]);
7095                }
7096                skipCall |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7097                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7098                if (firstSet > 0) { // Check set #s below the first bound set
7099                    for (uint32_t i = 0; i < firstSet; ++i) {
7100                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7101                            !verify_set_layout_compatibility(
7102                                dev_data, dev_data->setMap[pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i]], layout, i,
7103                                errorString)) {
7104                            skipCall |= log_msg(
7105                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7106                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7107                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7108                                "DescriptorSetDS %#" PRIxLEAST64
7109                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7110                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7111                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7112                        }
7113                    }
7114                }
7115                // Check if newly last bound set invalidates any remaining bound sets
7116                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7117                    if (oldFinalBoundSet &&
7118                        !verify_set_layout_compatibility(dev_data, dev_data->setMap[oldFinalBoundSet], layout, lastSetIndex,
7119                                                         errorString)) {
7120                        skipCall |=
7121                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7122                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)oldFinalBoundSet, __LINE__,
7123                                    DRAWSTATE_NONE, "DS", "DescriptorSetDS %#" PRIxLEAST64
7124                                                          " previously bound as set #%u is incompatible with set %#" PRIxLEAST64
7125                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
7126                                                          "disturbed by newly bound pipelineLayout (%#" PRIxLEAST64 ")",
7127                                    (uint64_t)oldFinalBoundSet, lastSetIndex,
7128                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7129                                    lastSetIndex + 1, (uint64_t)layout);
7130                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7131                    }
7132                }
7133            }
7134            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7135            if (totalDynamicDescriptors != dynamicOffsetCount) {
7136                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7137                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7138                                    DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7139                                    "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7140                                    "is %u. It should exactly match the number of dynamic descriptors.",
7141                                    setCount, totalDynamicDescriptors, dynamicOffsetCount);
7142            }
7143            // Save dynamicOffsets bound to this CB
7144            for (uint32_t i = 0; i < dynamicOffsetCount; i++) {
7145                pCB->lastBound[pipelineBindPoint].dynamicOffsets.emplace_back(pDynamicOffsets[i]);
7146            }
7147        } else {
7148            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7149        }
7150    }
7151    lock.unlock();
7152    if (!skipCall)
7153        dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7154                                                               pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7155}
7156
7157VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7158vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
7159    bool skipCall = false;
7160    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7161    std::unique_lock<std::mutex> lock(global_lock);
7162#if MTMERGESOURCE
7163    VkDeviceMemory mem;
7164    skipCall =
7165        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7166    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7167    if (cb_data != dev_data->commandBufferMap.end()) {
7168        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindIndexBuffer()"); };
7169        cb_data->second->validate_functions.push_back(function);
7170    }
7171    // TODO : Somewhere need to verify that IBs have correct usage state flagged
7172#endif
7173    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7174    if (pCB) {
7175        skipCall |= addCmd(dev_data, pCB, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7176        VkDeviceSize offset_align = 0;
7177        switch (indexType) {
7178        case VK_INDEX_TYPE_UINT16:
7179            offset_align = 2;
7180            break;
7181        case VK_INDEX_TYPE_UINT32:
7182            offset_align = 4;
7183            break;
7184        default:
7185            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7186            break;
7187        }
7188        if (!offset_align || (offset % offset_align)) {
7189            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7190                                DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7191                                "vkCmdBindIndexBuffer() offset (%#" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7192                                offset, string_VkIndexType(indexType));
7193        }
7194        pCB->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7195    }
7196    lock.unlock();
7197    if (!skipCall)
7198        dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7199}
7200
7201void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7202    uint32_t end = firstBinding + bindingCount;
7203    if (pCB->currentDrawData.buffers.size() < end) {
7204        pCB->currentDrawData.buffers.resize(end);
7205    }
7206    for (uint32_t i = 0; i < bindingCount; ++i) {
7207        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7208    }
7209}
7210
7211static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7212
7213VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7214                                                                  uint32_t bindingCount, const VkBuffer *pBuffers,
7215                                                                  const VkDeviceSize *pOffsets) {
7216    bool skipCall = false;
7217    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7218    std::unique_lock<std::mutex> lock(global_lock);
7219#if MTMERGESOURCE
7220    for (uint32_t i = 0; i < bindingCount; ++i) {
7221        VkDeviceMemory mem;
7222        skipCall |= get_mem_binding_from_object(dev_data, (uint64_t)pBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7223        auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7224        if (cb_data != dev_data->commandBufferMap.end()) {
7225            std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindVertexBuffers()"); };
7226            cb_data->second->validate_functions.push_back(function);
7227        }
7228    }
7229    // TODO : Somewhere need to verify that VBs have correct usage state flagged
7230#endif
7231    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7232    if (pCB) {
7233        addCmd(dev_data, pCB, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7234        updateResourceTracking(pCB, firstBinding, bindingCount, pBuffers);
7235    } else {
7236        skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7237    }
7238    lock.unlock();
7239    if (!skipCall)
7240        dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7241}
7242
7243/* expects global_lock to be held by caller */
7244static bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
7245    bool skip_call = false;
7246
7247    for (auto imageView : pCB->updateImages) {
7248        auto iv_data = dev_data->imageViewMap.find(imageView);
7249        if (iv_data == dev_data->imageViewMap.end())
7250            continue;
7251        VkImage image = iv_data->second.image;
7252        VkDeviceMemory mem;
7253        skip_call |=
7254            get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7255        std::function<bool()> function = [=]() {
7256            set_memory_valid(dev_data, mem, true, image);
7257            return false;
7258        };
7259        pCB->validate_functions.push_back(function);
7260    }
7261    for (auto buffer : pCB->updateBuffers) {
7262        VkDeviceMemory mem;
7263        skip_call |= get_mem_binding_from_object(dev_data, (uint64_t)buffer,
7264                                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7265        std::function<bool()> function = [=]() {
7266            set_memory_valid(dev_data, mem, true);
7267            return false;
7268        };
7269        pCB->validate_functions.push_back(function);
7270    }
7271    return skip_call;
7272}
7273
7274VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7275                                                     uint32_t firstVertex, uint32_t firstInstance) {
7276    bool skipCall = false;
7277    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7278    std::unique_lock<std::mutex> lock(global_lock);
7279    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7280    if (pCB) {
7281        skipCall |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
7282        pCB->drawCount[DRAW]++;
7283        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS);
7284        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7285        // TODO : Need to pass commandBuffer as srcObj here
7286        skipCall |=
7287            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7288                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW]++);
7289        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7290        if (!skipCall) {
7291            updateResourceTrackingOnDraw(pCB);
7292        }
7293        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
7294    }
7295    lock.unlock();
7296    if (!skipCall)
7297        dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
7298}
7299
7300VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
7301                                                            uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
7302                                                            uint32_t firstInstance) {
7303    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7304    bool skipCall = false;
7305    std::unique_lock<std::mutex> lock(global_lock);
7306    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7307    if (pCB) {
7308        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
7309        pCB->drawCount[DRAW_INDEXED]++;
7310        skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS);
7311        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7312        // TODO : Need to pass commandBuffer as srcObj here
7313        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7314                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7315                            "vkCmdDrawIndexed() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
7316        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7317        if (!skipCall) {
7318            updateResourceTrackingOnDraw(pCB);
7319        }
7320        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
7321    }
7322    lock.unlock();
7323    if (!skipCall)
7324        dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
7325                                                        firstInstance);
7326}
7327
7328VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7329vkCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7330    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7331    bool skipCall = false;
7332    std::unique_lock<std::mutex> lock(global_lock);
7333#if MTMERGESOURCE
7334    VkDeviceMemory mem;
7335    // MTMTODO : merge with code below
7336    skipCall =
7337        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7338    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndirect");
7339#endif
7340    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7341    if (pCB) {
7342        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
7343        pCB->drawCount[DRAW_INDIRECT]++;
7344        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS);
7345        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7346        // TODO : Need to pass commandBuffer as srcObj here
7347        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7348                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7349                            "vkCmdDrawIndirect() call #%" PRIu64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
7350        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7351        if (!skipCall) {
7352            updateResourceTrackingOnDraw(pCB);
7353        }
7354        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndirect");
7355    }
7356    lock.unlock();
7357    if (!skipCall)
7358        dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
7359}
7360
7361VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7362vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7363    bool skipCall = false;
7364    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7365    std::unique_lock<std::mutex> lock(global_lock);
7366#if MTMERGESOURCE
7367    VkDeviceMemory mem;
7368    // MTMTODO : merge with code below
7369    skipCall =
7370        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7371    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndexedIndirect");
7372#endif
7373    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7374    if (pCB) {
7375        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
7376        pCB->drawCount[DRAW_INDEXED_INDIRECT]++;
7377        skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS);
7378        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7379        // TODO : Need to pass commandBuffer as srcObj here
7380        skipCall |=
7381            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7382                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call #%" PRIu64 ", reporting DS state:",
7383                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
7384        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
7385        if (!skipCall) {
7386            updateResourceTrackingOnDraw(pCB);
7387        }
7388        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexedIndirect");
7389    }
7390    lock.unlock();
7391    if (!skipCall)
7392        dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
7393}
7394
7395VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7396    bool skipCall = false;
7397    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7398    std::unique_lock<std::mutex> lock(global_lock);
7399    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7400    if (pCB) {
7401        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE);
7402        // TODO : Call below is temporary until call above can be re-enabled
7403        update_shader_storage_images_and_buffers(dev_data, pCB);
7404        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7405        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
7406        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
7407    }
7408    lock.unlock();
7409    if (!skipCall)
7410        dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
7411}
7412
7413VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7414vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7415    bool skipCall = false;
7416    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7417    std::unique_lock<std::mutex> lock(global_lock);
7418#if MTMERGESOURCE
7419    VkDeviceMemory mem;
7420    skipCall =
7421        get_mem_binding_from_object(dev_data, (uint64_t)buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7422    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDispatchIndirect");
7423#endif
7424    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7425    if (pCB) {
7426        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE);
7427        // TODO : Call below is temporary until call above can be re-enabled
7428        update_shader_storage_images_and_buffers(dev_data, pCB);
7429        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7430        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
7431        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatchIndirect");
7432    }
7433    lock.unlock();
7434    if (!skipCall)
7435        dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset);
7436}
7437
7438VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7439                                                           uint32_t regionCount, const VkBufferCopy *pRegions) {
7440    bool skipCall = false;
7441    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7442    std::unique_lock<std::mutex> lock(global_lock);
7443#if MTMERGESOURCE
7444    VkDeviceMemory mem;
7445    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7446    skipCall =
7447        get_mem_binding_from_object(dev_data, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7448    if (cb_data != dev_data->commandBufferMap.end()) {
7449        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBuffer()"); };
7450        cb_data->second->validate_functions.push_back(function);
7451    }
7452    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7453    skipCall |=
7454        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7455    if (cb_data != dev_data->commandBufferMap.end()) {
7456        std::function<bool()> function = [=]() {
7457            set_memory_valid(dev_data, mem, true);
7458            return false;
7459        };
7460        cb_data->second->validate_functions.push_back(function);
7461    }
7462    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBuffer");
7463    // Validate that SRC & DST buffers have correct usage flags set
7464    skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
7465                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7466    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7467                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7468#endif
7469    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7470    if (pCB) {
7471        skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
7472        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBuffer");
7473    }
7474    lock.unlock();
7475    if (!skipCall)
7476        dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7477}
7478
7479static bool VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, VkImageSubresourceLayers subLayers,
7480                                    VkImageLayout srcImageLayout) {
7481    bool skip_call = false;
7482
7483    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7484    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7485    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7486        uint32_t layer = i + subLayers.baseArrayLayer;
7487        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7488        IMAGE_CMD_BUF_LAYOUT_NODE node;
7489        if (!FindLayout(pCB, srcImage, sub, node)) {
7490            SetLayout(pCB, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
7491            continue;
7492        }
7493        if (node.layout != srcImageLayout) {
7494            // TODO: Improve log message in the next pass
7495            skip_call |=
7496                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7497                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
7498                                                                        "and doesn't match the current layout %s.",
7499                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
7500        }
7501    }
7502    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
7503        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7504            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7505            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7506                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7507                                 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
7508        } else {
7509            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7510                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
7511                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
7512                                 string_VkImageLayout(srcImageLayout));
7513        }
7514    }
7515    return skip_call;
7516}
7517
7518static bool VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkImageSubresourceLayers subLayers,
7519                                  VkImageLayout destImageLayout) {
7520    bool skip_call = false;
7521
7522    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7523    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7524    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7525        uint32_t layer = i + subLayers.baseArrayLayer;
7526        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7527        IMAGE_CMD_BUF_LAYOUT_NODE node;
7528        if (!FindLayout(pCB, destImage, sub, node)) {
7529            SetLayout(pCB, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
7530            continue;
7531        }
7532        if (node.layout != destImageLayout) {
7533            skip_call |=
7534                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7535                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7536                                                                        "doesn't match the current layout %s.",
7537                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7538        }
7539    }
7540    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7541        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7542            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7543            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7544                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7545                                 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7546        } else {
7547            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7548                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
7549                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
7550                                 string_VkImageLayout(destImageLayout));
7551        }
7552    }
7553    return skip_call;
7554}
7555
7556VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7557vkCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7558               VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
7559    bool skipCall = false;
7560    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7561    std::unique_lock<std::mutex> lock(global_lock);
7562#if MTMERGESOURCE
7563    VkDeviceMemory mem;
7564    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7565    // Validate that src & dst images have correct usage flags set
7566    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7567    if (cb_data != dev_data->commandBufferMap.end()) {
7568        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImage()", srcImage); };
7569        cb_data->second->validate_functions.push_back(function);
7570    }
7571    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
7572    skipCall |=
7573        get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7574    if (cb_data != dev_data->commandBufferMap.end()) {
7575        std::function<bool()> function = [=]() {
7576            set_memory_valid(dev_data, mem, true, dstImage);
7577            return false;
7578        };
7579        cb_data->second->validate_functions.push_back(function);
7580    }
7581    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImage");
7582    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7583                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7584    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7585                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7586#endif
7587    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7588    if (pCB) {
7589        skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGE, "vkCmdCopyImage()");
7590        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImage");
7591        for (uint32_t i = 0; i < regionCount; ++i) {
7592            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].srcSubresource, srcImageLayout);
7593            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].dstSubresource, dstImageLayout);
7594        }
7595    }
7596    lock.unlock();
7597    if (!skipCall)
7598        dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7599                                                      regionCount, pRegions);
7600}
7601
7602VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7603vkCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7604               VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
7605    bool skipCall = false;
7606    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7607    std::unique_lock<std::mutex> lock(global_lock);
7608#if MTMERGESOURCE
7609    VkDeviceMemory mem;
7610    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7611    // Validate that src & dst images have correct usage flags set
7612    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7613    if (cb_data != dev_data->commandBufferMap.end()) {
7614        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBlitImage()", srcImage); };
7615        cb_data->second->validate_functions.push_back(function);
7616    }
7617    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
7618    skipCall |=
7619        get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7620    if (cb_data != dev_data->commandBufferMap.end()) {
7621        std::function<bool()> function = [=]() {
7622            set_memory_valid(dev_data, mem, true, dstImage);
7623            return false;
7624        };
7625        cb_data->second->validate_functions.push_back(function);
7626    }
7627    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdBlitImage");
7628    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7629                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7630    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7631                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7632#endif
7633    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7634    if (pCB) {
7635        skipCall |= addCmd(dev_data, pCB, CMD_BLITIMAGE, "vkCmdBlitImage()");
7636        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBlitImage");
7637    }
7638    lock.unlock();
7639    if (!skipCall)
7640        dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7641                                                      regionCount, pRegions, filter);
7642}
7643
7644VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
7645                                                                  VkImage dstImage, VkImageLayout dstImageLayout,
7646                                                                  uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7647    bool skipCall = false;
7648    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7649    std::unique_lock<std::mutex> lock(global_lock);
7650#if MTMERGESOURCE
7651    VkDeviceMemory mem;
7652    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7653    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7654    if (cb_data != dev_data->commandBufferMap.end()) {
7655        std::function<bool()> function = [=]() {
7656            set_memory_valid(dev_data, mem, true, dstImage);
7657            return false;
7658        };
7659        cb_data->second->validate_functions.push_back(function);
7660    }
7661    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
7662    skipCall |=
7663        get_mem_binding_from_object(dev_data, (uint64_t)srcBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7664    if (cb_data != dev_data->commandBufferMap.end()) {
7665        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdCopyBufferToImage()"); };
7666        cb_data->second->validate_functions.push_back(function);
7667    }
7668    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyBufferToImage");
7669    // Validate that src buff & dst image have correct usage flags set
7670    skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
7671                                            "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7672    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7673                                           "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7674#endif
7675    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7676    if (pCB) {
7677        skipCall |= addCmd(dev_data, pCB, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
7678        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyBufferToImage");
7679        for (uint32_t i = 0; i < regionCount; ++i) {
7680            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].imageSubresource, dstImageLayout);
7681        }
7682    }
7683    lock.unlock();
7684    if (!skipCall)
7685        dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
7686                                                              pRegions);
7687}
7688
7689VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
7690                                                                  VkImageLayout srcImageLayout, VkBuffer dstBuffer,
7691                                                                  uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7692    bool skipCall = false;
7693    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7694    std::unique_lock<std::mutex> lock(global_lock);
7695#if MTMERGESOURCE
7696    VkDeviceMemory mem;
7697    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7698    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7699    if (cb_data != dev_data->commandBufferMap.end()) {
7700        std::function<bool()> function = [=]() {
7701            return validate_memory_is_valid(dev_data, mem, "vkCmdCopyImageToBuffer()", srcImage);
7702        };
7703        cb_data->second->validate_functions.push_back(function);
7704    }
7705    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
7706    skipCall |=
7707        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7708    if (cb_data != dev_data->commandBufferMap.end()) {
7709        std::function<bool()> function = [=]() {
7710            set_memory_valid(dev_data, mem, true);
7711            return false;
7712        };
7713        cb_data->second->validate_functions.push_back(function);
7714    }
7715    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyImageToBuffer");
7716    // Validate that dst buff & src image have correct usage flags set
7717    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7718                                           "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7719    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7720                                            "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7721#endif
7722    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7723    if (pCB) {
7724        skipCall |= addCmd(dev_data, pCB, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
7725        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyImageToBuffer");
7726        for (uint32_t i = 0; i < regionCount; ++i) {
7727            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].imageSubresource, srcImageLayout);
7728        }
7729    }
7730    lock.unlock();
7731    if (!skipCall)
7732        dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
7733                                                              pRegions);
7734}
7735
7736VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
7737                                                             VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
7738    bool skipCall = false;
7739    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7740    std::unique_lock<std::mutex> lock(global_lock);
7741#if MTMERGESOURCE
7742    VkDeviceMemory mem;
7743    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7744    skipCall =
7745        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7746    if (cb_data != dev_data->commandBufferMap.end()) {
7747        std::function<bool()> function = [=]() {
7748            set_memory_valid(dev_data, mem, true);
7749            return false;
7750        };
7751        cb_data->second->validate_functions.push_back(function);
7752    }
7753    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdUpdateBuffer");
7754    // Validate that dst buff has correct usage flags set
7755    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7756                                            "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7757#endif
7758    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7759    if (pCB) {
7760        skipCall |= addCmd(dev_data, pCB, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
7761        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyUpdateBuffer");
7762    }
7763    lock.unlock();
7764    if (!skipCall)
7765        dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
7766}
7767
7768VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7769vkCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
7770    bool skipCall = false;
7771    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7772    std::unique_lock<std::mutex> lock(global_lock);
7773#if MTMERGESOURCE
7774    VkDeviceMemory mem;
7775    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7776    skipCall =
7777        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
7778    if (cb_data != dev_data->commandBufferMap.end()) {
7779        std::function<bool()> function = [=]() {
7780            set_memory_valid(dev_data, mem, true);
7781            return false;
7782        };
7783        cb_data->second->validate_functions.push_back(function);
7784    }
7785    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdFillBuffer");
7786    // Validate that dst buff has correct usage flags set
7787    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7788                                            "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7789#endif
7790    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7791    if (pCB) {
7792        skipCall |= addCmd(dev_data, pCB, CMD_FILLBUFFER, "vkCmdFillBuffer()");
7793        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyFillBuffer");
7794    }
7795    lock.unlock();
7796    if (!skipCall)
7797        dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
7798}
7799
7800VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
7801                                                                 const VkClearAttachment *pAttachments, uint32_t rectCount,
7802                                                                 const VkClearRect *pRects) {
7803    bool skipCall = false;
7804    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7805    std::unique_lock<std::mutex> lock(global_lock);
7806    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7807    if (pCB) {
7808        skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
7809        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
7810        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
7811            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
7812            // TODO : commandBuffer should be srcObj
7813            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
7814            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
7815            // call CmdClearAttachments
7816            // Otherwise this seems more like a performance warning.
7817            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7818                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
7819                                "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
7820                                " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
7821                                (uint64_t)(commandBuffer));
7822        }
7823        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments");
7824    }
7825
7826    // Validate that attachment is in reference list of active subpass
7827    if (pCB->activeRenderPass) {
7828        const VkRenderPassCreateInfo *pRPCI = dev_data->renderPassMap[pCB->activeRenderPass]->pCreateInfo;
7829        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
7830
7831        for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
7832            const VkClearAttachment *attachment = &pAttachments[attachment_idx];
7833            if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
7834                bool found = false;
7835                for (uint32_t i = 0; i < pSD->colorAttachmentCount; i++) {
7836                    if (attachment->colorAttachment == pSD->pColorAttachments[i].attachment) {
7837                        found = true;
7838                        break;
7839                    }
7840                }
7841                if (!found) {
7842                    skipCall |= log_msg(
7843                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7844                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
7845                        "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d",
7846                        attachment->colorAttachment, pCB->activeSubpass);
7847                }
7848            } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
7849                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
7850                    (pSD->pDepthStencilAttachment->attachment ==
7851                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
7852
7853                    skipCall |= log_msg(
7854                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7855                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
7856                        "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found "
7857                        "in active subpass %d",
7858                        attachment->colorAttachment,
7859                        (pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED,
7860                        pCB->activeSubpass);
7861                }
7862            }
7863        }
7864    }
7865    lock.unlock();
7866    if (!skipCall)
7867        dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
7868}
7869
7870VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
7871                                                                VkImageLayout imageLayout, const VkClearColorValue *pColor,
7872                                                                uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
7873    bool skipCall = false;
7874    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7875    std::unique_lock<std::mutex> lock(global_lock);
7876#if MTMERGESOURCE
7877    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
7878    VkDeviceMemory mem;
7879    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7880    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7881    if (cb_data != dev_data->commandBufferMap.end()) {
7882        std::function<bool()> function = [=]() {
7883            set_memory_valid(dev_data, mem, true, image);
7884            return false;
7885        };
7886        cb_data->second->validate_functions.push_back(function);
7887    }
7888    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearColorImage");
7889#endif
7890    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7891    if (pCB) {
7892        skipCall |= addCmd(dev_data, pCB, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
7893        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearColorImage");
7894    }
7895    lock.unlock();
7896    if (!skipCall)
7897        dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
7898}
7899
7900VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7901vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
7902                            const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
7903                            const VkImageSubresourceRange *pRanges) {
7904    bool skipCall = false;
7905    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7906    std::unique_lock<std::mutex> lock(global_lock);
7907#if MTMERGESOURCE
7908    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
7909    VkDeviceMemory mem;
7910    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7911    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7912    if (cb_data != dev_data->commandBufferMap.end()) {
7913        std::function<bool()> function = [=]() {
7914            set_memory_valid(dev_data, mem, true, image);
7915            return false;
7916        };
7917        cb_data->second->validate_functions.push_back(function);
7918    }
7919    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearDepthStencilImage");
7920#endif
7921    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7922    if (pCB) {
7923        skipCall |= addCmd(dev_data, pCB, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
7924        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdClearDepthStencilImage");
7925    }
7926    lock.unlock();
7927    if (!skipCall)
7928        dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
7929                                                                   pRanges);
7930}
7931
7932VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7933vkCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7934                  VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
7935    bool skipCall = false;
7936    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7937    std::unique_lock<std::mutex> lock(global_lock);
7938#if MTMERGESOURCE
7939    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
7940    VkDeviceMemory mem;
7941    skipCall = get_mem_binding_from_object(dev_data, (uint64_t)srcImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7942    if (cb_data != dev_data->commandBufferMap.end()) {
7943        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdResolveImage()", srcImage); };
7944        cb_data->second->validate_functions.push_back(function);
7945    }
7946    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
7947    skipCall |=
7948        get_mem_binding_from_object(dev_data, (uint64_t)dstImage, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
7949    if (cb_data != dev_data->commandBufferMap.end()) {
7950        std::function<bool()> function = [=]() {
7951            set_memory_valid(dev_data, mem, true, dstImage);
7952            return false;
7953        };
7954        cb_data->second->validate_functions.push_back(function);
7955    }
7956    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdResolveImage");
7957#endif
7958    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7959    if (pCB) {
7960        skipCall |= addCmd(dev_data, pCB, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
7961        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResolveImage");
7962    }
7963    lock.unlock();
7964    if (!skipCall)
7965        dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7966                                                         regionCount, pRegions);
7967}
7968
7969bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7970    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7971    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7972    if (pCB) {
7973        pCB->eventToStageMap[event] = stageMask;
7974    }
7975    auto queue_data = dev_data->queueMap.find(queue);
7976    if (queue_data != dev_data->queueMap.end()) {
7977        queue_data->second.eventToStageMap[event] = stageMask;
7978    }
7979    return false;
7980}
7981
7982VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
7983vkCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7984    bool skipCall = false;
7985    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7986    std::unique_lock<std::mutex> lock(global_lock);
7987    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7988    if (pCB) {
7989        skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
7990        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
7991        pCB->events.push_back(event);
7992        std::function<bool(VkQueue)> eventUpdate =
7993            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
7994        pCB->eventUpdates.push_back(eventUpdate);
7995    }
7996    lock.unlock();
7997    if (!skipCall)
7998        dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask);
7999}
8000
8001VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8002vkCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8003    bool skipCall = false;
8004    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8005    std::unique_lock<std::mutex> lock(global_lock);
8006    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8007    if (pCB) {
8008        skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
8009        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
8010        pCB->events.push_back(event);
8011        std::function<bool(VkQueue)> eventUpdate =
8012            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
8013        pCB->eventUpdates.push_back(eventUpdate);
8014    }
8015    lock.unlock();
8016    if (!skipCall)
8017        dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask);
8018}
8019
8020static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8021                                   const VkImageMemoryBarrier *pImgMemBarriers) {
8022    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8023    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8024    bool skip = false;
8025    uint32_t levelCount = 0;
8026    uint32_t layerCount = 0;
8027
8028    for (uint32_t i = 0; i < memBarrierCount; ++i) {
8029        auto mem_barrier = &pImgMemBarriers[i];
8030        if (!mem_barrier)
8031            continue;
8032        // TODO: Do not iterate over every possibility - consolidate where
8033        // possible
8034        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
8035
8036        for (uint32_t j = 0; j < levelCount; j++) {
8037            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
8038            for (uint32_t k = 0; k < layerCount; k++) {
8039                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
8040                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
8041                IMAGE_CMD_BUF_LAYOUT_NODE node;
8042                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
8043                    SetLayout(pCB, mem_barrier->image, sub,
8044                              IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
8045                    continue;
8046                }
8047                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
8048                    // TODO: Set memory invalid which is in mem_tracker currently
8049                } else if (node.layout != mem_barrier->oldLayout) {
8050                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8051                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
8052                                                                                    "when current layout is %s.",
8053                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
8054                }
8055                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
8056            }
8057        }
8058    }
8059    return skip;
8060}
8061
8062// Print readable FlagBits in FlagMask
8063static std::string string_VkAccessFlags(VkAccessFlags accessMask) {
8064    std::string result;
8065    std::string separator;
8066
8067    if (accessMask == 0) {
8068        result = "[None]";
8069    } else {
8070        result = "[";
8071        for (auto i = 0; i < 32; i++) {
8072            if (accessMask & (1 << i)) {
8073                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
8074                separator = " | ";
8075            }
8076        }
8077        result = result + "]";
8078    }
8079    return result;
8080}
8081
8082// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
8083// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
8084// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
8085static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8086                             const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
8087                             const char *type) {
8088    bool skip_call = false;
8089
8090    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
8091        if (accessMask & !(required_bit | optional_bits)) {
8092            // TODO: Verify against Valid Use
8093            skip_call |=
8094                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8095                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8096                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8097        }
8098    } else {
8099        if (!required_bit) {
8100            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8101                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
8102                                                                  "%s when layout is %s, unless the app has previously added a "
8103                                                                  "barrier for this transition.",
8104                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
8105                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
8106        } else {
8107            std::string opt_bits;
8108            if (optional_bits != 0) {
8109                std::stringstream ss;
8110                ss << optional_bits;
8111                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
8112            }
8113            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8114                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
8115                                                                  "layout is %s, unless the app has previously added a barrier for "
8116                                                                  "this transition.",
8117                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
8118                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
8119        }
8120    }
8121    return skip_call;
8122}
8123
8124static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8125                                        const VkImageLayout &layout, const char *type) {
8126    bool skip_call = false;
8127    switch (layout) {
8128    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
8129        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
8130                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
8131        break;
8132    }
8133    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
8134        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
8135                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
8136        break;
8137    }
8138    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
8139        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
8140        break;
8141    }
8142    case VK_IMAGE_LAYOUT_PREINITIALIZED: {
8143        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
8144        break;
8145    }
8146    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
8147        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8148                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8149        break;
8150    }
8151    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
8152        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8153                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8154        break;
8155    }
8156    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
8157        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
8158        break;
8159    }
8160    case VK_IMAGE_LAYOUT_UNDEFINED: {
8161        if (accessMask != 0) {
8162            // TODO: Verify against Valid Use section spec
8163            skip_call |=
8164                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8165                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask %d %s are specified when layout is %s.",
8166                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8167        }
8168        break;
8169    }
8170    case VK_IMAGE_LAYOUT_GENERAL:
8171    default: { break; }
8172    }
8173    return skip_call;
8174}
8175
8176static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8177                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
8178                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
8179                             const VkImageMemoryBarrier *pImageMemBarriers) {
8180    bool skip_call = false;
8181    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8182    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8183    if (pCB->activeRenderPass && memBarrierCount) {
8184        if (!dev_data->renderPassMap[pCB->activeRenderPass]->hasSelfDependency[pCB->activeSubpass]) {
8185            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8186                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
8187                                                                  "with no self dependency specified.",
8188                                 funcName, pCB->activeSubpass);
8189        }
8190    }
8191    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
8192        auto mem_barrier = &pImageMemBarriers[i];
8193        auto image_data = dev_data->imageMap.find(mem_barrier->image);
8194        if (image_data != dev_data->imageMap.end()) {
8195            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
8196            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
8197            if (image_data->second.createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
8198                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
8199                // be VK_QUEUE_FAMILY_IGNORED
8200                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
8201                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8202                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8203                                         "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
8204                                         "VK_SHARING_MODE_CONCURRENT.  Src and dst "
8205                                         " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
8206                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8207                }
8208            } else {
8209                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
8210                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
8211                // or both be a valid queue family
8212                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
8213                    (src_q_f_index != dst_q_f_index)) {
8214                    skip_call |=
8215                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8216                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
8217                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
8218                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
8219                                                                     "must be.",
8220                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8221                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
8222                           ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
8223                            (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
8224                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8225                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8226                                         "%s: Image 0x%" PRIx64 " was created with sharingMode "
8227                                         "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
8228                                         " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
8229                                         "queueFamilies crated for this device.",
8230                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
8231                                         dst_q_f_index, dev_data->phys_dev_properties.queue_family_properties.size());
8232                }
8233            }
8234        }
8235
8236        if (mem_barrier) {
8237            skip_call |=
8238                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
8239            skip_call |=
8240                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
8241            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
8242                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8243                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
8244                                                         "PREINITIALIZED.",
8245                        funcName);
8246            }
8247            auto image_data = dev_data->imageMap.find(mem_barrier->image);
8248            VkFormat format = VK_FORMAT_UNDEFINED;
8249            uint32_t arrayLayers = 0, mipLevels = 0;
8250            bool imageFound = false;
8251            if (image_data != dev_data->imageMap.end()) {
8252                format = image_data->second.createInfo.format;
8253                arrayLayers = image_data->second.createInfo.arrayLayers;
8254                mipLevels = image_data->second.createInfo.mipLevels;
8255                imageFound = true;
8256            } else if (dev_data->device_extensions.wsi_enabled) {
8257                auto imageswap_data = dev_data->device_extensions.imageToSwapchainMap.find(mem_barrier->image);
8258                if (imageswap_data != dev_data->device_extensions.imageToSwapchainMap.end()) {
8259                    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(imageswap_data->second);
8260                    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
8261                        format = swapchain_data->second->createInfo.imageFormat;
8262                        arrayLayers = swapchain_data->second->createInfo.imageArrayLayers;
8263                        mipLevels = 1;
8264                        imageFound = true;
8265                    }
8266                }
8267            }
8268            if (imageFound) {
8269                if (vk_format_is_depth_and_stencil(format) &&
8270                    (!(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
8271                     !(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))) {
8272                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8273                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth and stencil format and thus must "
8274                                                             "have both VK_IMAGE_ASPECT_DEPTH_BIT and "
8275                                                             "VK_IMAGE_ASPECT_STENCIL_BIT set.",
8276                            funcName);
8277                }
8278                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
8279                                     ? 1
8280                                     : mem_barrier->subresourceRange.layerCount;
8281                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
8282                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8283                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
8284                                                             "baseArrayLayer (%d) and layerCount (%d) be less "
8285                                                             "than or equal to the total number of layers (%d).",
8286                            funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
8287                            arrayLayers);
8288                }
8289                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
8290                                     ? 1
8291                                     : mem_barrier->subresourceRange.levelCount;
8292                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
8293                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8294                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
8295                                                             "(%d) and levelCount (%d) be less than or equal to "
8296                                                             "the total number of levels (%d).",
8297                            funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
8298                            mipLevels);
8299                }
8300            }
8301        }
8302    }
8303    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
8304        auto mem_barrier = &pBufferMemBarriers[i];
8305        if (pCB->activeRenderPass) {
8306            skip_call |=
8307                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8308                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
8309        }
8310        if (!mem_barrier)
8311            continue;
8312
8313        // Validate buffer barrier queue family indices
8314        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8315             mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
8316            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8317             mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
8318            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8319                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8320                                 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
8321                                 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
8322                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8323                                 dev_data->phys_dev_properties.queue_family_properties.size());
8324        }
8325
8326        auto buffer_data = dev_data->bufferMap.find(mem_barrier->buffer);
8327        if (buffer_data != dev_data->bufferMap.end()) {
8328            VkDeviceSize buffer_size = (buffer_data->second.createInfo.sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO)
8329                                           ? buffer_data->second.createInfo.size
8330                                           : 0;
8331            if (mem_barrier->offset >= buffer_size) {
8332                skip_call |= log_msg(
8333                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8334                    DRAWSTATE_INVALID_BARRIER, "DS",
8335                    "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64 " which is not less than total size %" PRIu64 ".",
8336                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8337                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(buffer_size));
8338            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
8339                skip_call |= log_msg(
8340                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8341                    DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset %" PRIu64 " and size %" PRIu64
8342                                                     " whose sum is greater than total size %" PRIu64 ".",
8343                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8344                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
8345                    reinterpret_cast<const uint64_t &>(buffer_size));
8346            }
8347        }
8348    }
8349    return skip_call;
8350}
8351
8352bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
8353    bool skip_call = false;
8354    VkPipelineStageFlags stageMask = 0;
8355    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
8356    for (uint32_t i = 0; i < eventCount; ++i) {
8357        auto event = pCB->events[firstEventIndex + i];
8358        auto queue_data = dev_data->queueMap.find(queue);
8359        if (queue_data == dev_data->queueMap.end())
8360            return false;
8361        auto event_data = queue_data->second.eventToStageMap.find(event);
8362        if (event_data != queue_data->second.eventToStageMap.end()) {
8363            stageMask |= event_data->second;
8364        } else {
8365            auto global_event_data = dev_data->eventMap.find(event);
8366            if (global_event_data == dev_data->eventMap.end()) {
8367                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8368                                     reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
8369                                     "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
8370                                     reinterpret_cast<const uint64_t &>(event));
8371            } else {
8372                stageMask |= global_event_data->second.stageMask;
8373            }
8374        }
8375    }
8376    if (sourceStageMask != stageMask) {
8377        skip_call |=
8378            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8379                    DRAWSTATE_INVALID_EVENT, "DS",
8380                    "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%x which must be the bitwise OR of the "
8381                    "stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with vkSetEvent.",
8382                    sourceStageMask);
8383    }
8384    return skip_call;
8385}
8386
8387VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8388vkCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
8389                VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8390                uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8391                uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8392    bool skipCall = false;
8393    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8394    std::unique_lock<std::mutex> lock(global_lock);
8395    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8396    if (pCB) {
8397        auto firstEventIndex = pCB->events.size();
8398        for (uint32_t i = 0; i < eventCount; ++i) {
8399            pCB->waitedEvents.push_back(pEvents[i]);
8400            pCB->events.push_back(pEvents[i]);
8401        }
8402        std::function<bool(VkQueue)> eventUpdate =
8403            std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask);
8404        pCB->eventUpdates.push_back(eventUpdate);
8405        if (pCB->state == CB_RECORDING) {
8406            skipCall |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
8407        } else {
8408            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
8409        }
8410        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8411        skipCall |=
8412            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8413                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8414    }
8415    lock.unlock();
8416    if (!skipCall)
8417        dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
8418                                                       memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8419                                                       pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8420}
8421
8422VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8423vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
8424                     VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8425                     uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8426                     uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8427    bool skipCall = false;
8428    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8429    std::unique_lock<std::mutex> lock(global_lock);
8430    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8431    if (pCB) {
8432        skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8433        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8434        skipCall |=
8435            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8436                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8437    }
8438    lock.unlock();
8439    if (!skipCall)
8440        dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
8441                                                            memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8442                                                            pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8443}
8444
8445VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8446vkCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8447    bool skipCall = false;
8448    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8449    std::unique_lock<std::mutex> lock(global_lock);
8450    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8451    if (pCB) {
8452        QueryObject query = {queryPool, slot};
8453        pCB->activeQueries.insert(query);
8454        if (!pCB->startedQueries.count(query)) {
8455            pCB->startedQueries.insert(query);
8456        }
8457        skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8458    }
8459    lock.unlock();
8460    if (!skipCall)
8461        dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
8462}
8463
8464VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8465    bool skipCall = false;
8466    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8467    std::unique_lock<std::mutex> lock(global_lock);
8468    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8469    if (pCB) {
8470        QueryObject query = {queryPool, slot};
8471        if (!pCB->activeQueries.count(query)) {
8472            skipCall |=
8473                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8474                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool %" PRIu64 ", index %d",
8475                        (uint64_t)(queryPool), slot);
8476        } else {
8477            pCB->activeQueries.erase(query);
8478        }
8479        pCB->queryToStateMap[query] = 1;
8480        if (pCB->state == CB_RECORDING) {
8481            skipCall |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
8482        } else {
8483            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
8484        }
8485    }
8486    lock.unlock();
8487    if (!skipCall)
8488        dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
8489}
8490
8491VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8492vkCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
8493    bool skipCall = false;
8494    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8495    std::unique_lock<std::mutex> lock(global_lock);
8496    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8497    if (pCB) {
8498        for (uint32_t i = 0; i < queryCount; i++) {
8499            QueryObject query = {queryPool, firstQuery + i};
8500            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
8501            pCB->queryToStateMap[query] = 0;
8502        }
8503        if (pCB->state == CB_RECORDING) {
8504            skipCall |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8505        } else {
8506            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
8507        }
8508        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
8509    }
8510    lock.unlock();
8511    if (!skipCall)
8512        dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8513}
8514
8515VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8516vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
8517                          VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
8518    bool skipCall = false;
8519    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8520    std::unique_lock<std::mutex> lock(global_lock);
8521    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8522#if MTMERGESOURCE
8523    VkDeviceMemory mem;
8524    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
8525    skipCall |=
8526        get_mem_binding_from_object(dev_data, (uint64_t)dstBuffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, &mem);
8527    if (cb_data != dev_data->commandBufferMap.end()) {
8528        std::function<bool()> function = [=]() {
8529            set_memory_valid(dev_data, mem, true);
8530            return false;
8531        };
8532        cb_data->second->validate_functions.push_back(function);
8533    }
8534    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyQueryPoolResults");
8535    // Validate that DST buffer has correct usage flags set
8536    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8537                                            "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8538#endif
8539    if (pCB) {
8540        for (uint32_t i = 0; i < queryCount; i++) {
8541            QueryObject query = {queryPool, firstQuery + i};
8542            if (!pCB->queryToStateMap[query]) {
8543                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8544                                    __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
8545                                    "Requesting a copy from query to buffer with invalid query: queryPool %" PRIu64 ", index %d",
8546                                    (uint64_t)(queryPool), firstQuery + i);
8547            }
8548        }
8549        if (pCB->state == CB_RECORDING) {
8550            skipCall |= addCmd(dev_data, pCB, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8551        } else {
8552            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
8553        }
8554        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyQueryPoolResults");
8555    }
8556    lock.unlock();
8557    if (!skipCall)
8558        dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
8559                                                                 dstOffset, stride, flags);
8560}
8561
8562VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
8563                                                              VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
8564                                                              const void *pValues) {
8565    bool skipCall = false;
8566    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8567    std::unique_lock<std::mutex> lock(global_lock);
8568    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8569    if (pCB) {
8570        if (pCB->state == CB_RECORDING) {
8571            skipCall |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8572        } else {
8573            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
8574        }
8575    }
8576    if ((offset + size) > dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize) {
8577        skipCall |= validatePushConstantSize(dev_data, offset, size, "vkCmdPushConstants()");
8578    }
8579    // TODO : Add warning if push constant update doesn't align with range
8580    lock.unlock();
8581    if (!skipCall)
8582        dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8583}
8584
8585VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
8586vkCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
8587    bool skipCall = false;
8588    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8589    std::unique_lock<std::mutex> lock(global_lock);
8590    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8591    if (pCB) {
8592        QueryObject query = {queryPool, slot};
8593        pCB->queryToStateMap[query] = 1;
8594        if (pCB->state == CB_RECORDING) {
8595            skipCall |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8596        } else {
8597            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
8598        }
8599    }
8600    lock.unlock();
8601    if (!skipCall)
8602        dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8603}
8604
8605VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
8606                                                                   const VkAllocationCallbacks *pAllocator,
8607                                                                   VkFramebuffer *pFramebuffer) {
8608    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8609    VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
8610    if (VK_SUCCESS == result) {
8611        // Shadow create info and store in map
8612        std::lock_guard<std::mutex> lock(global_lock);
8613
8614        auto & fbNode = dev_data->frameBufferMap[*pFramebuffer];
8615        fbNode.createInfo = *pCreateInfo;
8616        if (pCreateInfo->pAttachments) {
8617            auto attachments = new VkImageView[pCreateInfo->attachmentCount];
8618            memcpy(attachments,
8619                   pCreateInfo->pAttachments,
8620                   pCreateInfo->attachmentCount * sizeof(VkImageView));
8621            fbNode.createInfo.pAttachments = attachments;
8622        }
8623        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8624            VkImageView view = pCreateInfo->pAttachments[i];
8625            auto view_data = dev_data->imageViewMap.find(view);
8626            if (view_data == dev_data->imageViewMap.end()) {
8627                continue;
8628            }
8629            MT_FB_ATTACHMENT_INFO fb_info;
8630            get_mem_binding_from_object(dev_data, (uint64_t)(view_data->second.image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8631                                        &fb_info.mem);
8632            fb_info.image = view_data->second.image;
8633            fbNode.attachments.push_back(fb_info);
8634        }
8635    }
8636    return result;
8637}
8638
8639static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
8640                           std::unordered_set<uint32_t> &processed_nodes) {
8641    // If we have already checked this node we have not found a dependency path so return false.
8642    if (processed_nodes.count(index))
8643        return false;
8644    processed_nodes.insert(index);
8645    const DAGNode &node = subpass_to_node[index];
8646    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
8647    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
8648        for (auto elem : node.prev) {
8649            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
8650                return true;
8651        }
8652    } else {
8653        return true;
8654    }
8655    return false;
8656}
8657
8658static bool CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
8659                                  const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
8660    bool result = true;
8661    // Loop through all subpasses that share the same attachment and make sure a dependency exists
8662    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
8663        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k])
8664            continue;
8665        const DAGNode &node = subpass_to_node[subpass];
8666        // Check for a specified dependency between the two nodes. If one exists we are done.
8667        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
8668        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
8669        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
8670            // If no dependency exits an implicit dependency still might. If so, warn and if not throw an error.
8671            std::unordered_set<uint32_t> processed_nodes;
8672            if (FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
8673                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes)) {
8674                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8675                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8676                                     "A dependency between subpasses %d and %d must exist but only an implicit one is specified.",
8677                                     subpass, dependent_subpasses[k]);
8678            } else {
8679                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8680                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8681                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
8682                                     dependent_subpasses[k]);
8683                result = false;
8684            }
8685        }
8686    }
8687    return result;
8688}
8689
8690static bool CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
8691                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
8692    const DAGNode &node = subpass_to_node[index];
8693    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
8694    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8695    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8696        if (attachment == subpass.pColorAttachments[j].attachment)
8697            return true;
8698    }
8699    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8700        if (attachment == subpass.pDepthStencilAttachment->attachment)
8701            return true;
8702    }
8703    bool result = false;
8704    // Loop through previous nodes and see if any of them write to the attachment.
8705    for (auto elem : node.prev) {
8706        result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
8707    }
8708    // If the attachment was written to by a previous node than this node needs to preserve it.
8709    if (result && depth > 0) {
8710        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8711        bool has_preserved = false;
8712        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8713            if (subpass.pPreserveAttachments[j] == attachment) {
8714                has_preserved = true;
8715                break;
8716            }
8717        }
8718        if (!has_preserved) {
8719            skip_call |=
8720                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8721                        DRAWSTATE_INVALID_RENDERPASS, "DS",
8722                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
8723        }
8724    }
8725    return result;
8726}
8727
8728template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
8729    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
8730           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
8731}
8732
8733bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
8734    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
8735            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
8736}
8737
8738static bool ValidateDependencies(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin,
8739                                 const std::vector<DAGNode> &subpass_to_node) {
8740    bool skip_call = false;
8741    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
8742    const VkRenderPassCreateInfo *pCreateInfo = my_data->renderPassMap.at(pRenderPassBegin->renderPass)->pCreateInfo;
8743    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
8744    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
8745    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
8746    // Find overlapping attachments
8747    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8748        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
8749            VkImageView viewi = pFramebufferInfo->pAttachments[i];
8750            VkImageView viewj = pFramebufferInfo->pAttachments[j];
8751            if (viewi == viewj) {
8752                overlapping_attachments[i].push_back(j);
8753                overlapping_attachments[j].push_back(i);
8754                continue;
8755            }
8756            auto view_data_i = my_data->imageViewMap.find(viewi);
8757            auto view_data_j = my_data->imageViewMap.find(viewj);
8758            if (view_data_i == my_data->imageViewMap.end() || view_data_j == my_data->imageViewMap.end()) {
8759                continue;
8760            }
8761            if (view_data_i->second.image == view_data_j->second.image &&
8762                isRegionOverlapping(view_data_i->second.subresourceRange, view_data_j->second.subresourceRange)) {
8763                overlapping_attachments[i].push_back(j);
8764                overlapping_attachments[j].push_back(i);
8765                continue;
8766            }
8767            auto image_data_i = my_data->imageMap.find(view_data_i->second.image);
8768            auto image_data_j = my_data->imageMap.find(view_data_j->second.image);
8769            if (image_data_i == my_data->imageMap.end() || image_data_j == my_data->imageMap.end()) {
8770                continue;
8771            }
8772            if (image_data_i->second.mem == image_data_j->second.mem &&
8773                isRangeOverlapping(image_data_i->second.memOffset, image_data_i->second.memSize, image_data_j->second.memOffset,
8774                                   image_data_j->second.memSize)) {
8775                overlapping_attachments[i].push_back(j);
8776                overlapping_attachments[j].push_back(i);
8777            }
8778        }
8779    }
8780    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
8781        uint32_t attachment = i;
8782        for (auto other_attachment : overlapping_attachments[i]) {
8783            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8784                skip_call |=
8785                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8786                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
8787                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8788                            attachment, other_attachment);
8789            }
8790            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8791                skip_call |=
8792                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8793                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
8794                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8795                            other_attachment, attachment);
8796            }
8797        }
8798    }
8799    // Find for each attachment the subpasses that use them.
8800    unordered_set<uint32_t> attachmentIndices;
8801    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8802        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8803        attachmentIndices.clear();
8804        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8805            uint32_t attachment = subpass.pInputAttachments[j].attachment;
8806            input_attachment_to_subpass[attachment].push_back(i);
8807            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8808                input_attachment_to_subpass[overlapping_attachment].push_back(i);
8809            }
8810        }
8811        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8812            uint32_t attachment = subpass.pColorAttachments[j].attachment;
8813            output_attachment_to_subpass[attachment].push_back(i);
8814            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8815                output_attachment_to_subpass[overlapping_attachment].push_back(i);
8816            }
8817            attachmentIndices.insert(attachment);
8818        }
8819        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8820            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8821            output_attachment_to_subpass[attachment].push_back(i);
8822            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8823                output_attachment_to_subpass[overlapping_attachment].push_back(i);
8824            }
8825
8826            if (attachmentIndices.count(attachment)) {
8827                skip_call |=
8828                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
8829                            0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8830                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).",
8831                            attachment, i);
8832            }
8833        }
8834    }
8835    // If there is a dependency needed make sure one exists
8836    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8837        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8838        // If the attachment is an input then all subpasses that output must have a dependency relationship
8839        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8840            const uint32_t &attachment = subpass.pInputAttachments[j].attachment;
8841            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8842        }
8843        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
8844        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8845            const uint32_t &attachment = subpass.pColorAttachments[j].attachment;
8846            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8847            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8848        }
8849        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8850            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
8851            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8852            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8853        }
8854    }
8855    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
8856    // written.
8857    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8858        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8859        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8860            CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
8861        }
8862    }
8863    return skip_call;
8864}
8865
8866static bool ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
8867    bool skip = false;
8868
8869    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8870        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8871        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8872            if (subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
8873                subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
8874                if (subpass.pInputAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
8875                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8876                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8877                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8878                                    "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
8879                } else {
8880                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8881                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8882                                    "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
8883                                    string_VkImageLayout(subpass.pInputAttachments[j].layout));
8884                }
8885            }
8886        }
8887        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8888            if (subpass.pColorAttachments[j].layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
8889                if (subpass.pColorAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
8890                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8891                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8892                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8893                                    "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
8894                } else {
8895                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8896                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8897                                    "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
8898                                    string_VkImageLayout(subpass.pColorAttachments[j].layout));
8899                }
8900            }
8901        }
8902        if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
8903            if (subpass.pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
8904                if (subpass.pDepthStencilAttachment->layout == VK_IMAGE_LAYOUT_GENERAL) {
8905                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8906                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8907                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8908                                    "Layout for depth attachment is GENERAL but should be DEPTH_STENCIL_ATTACHMENT_OPTIMAL.");
8909                } else {
8910                    skip |=
8911                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8912                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8913                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.",
8914                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
8915                }
8916            }
8917        }
8918    }
8919    return skip;
8920}
8921
8922static bool CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
8923                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
8924    bool skip_call = false;
8925    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8926        DAGNode &subpass_node = subpass_to_node[i];
8927        subpass_node.pass = i;
8928    }
8929    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
8930        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
8931        if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
8932            dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
8933            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8934                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
8935                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
8936        } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
8937            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8938                                 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
8939        } else if (dependency.srcSubpass == dependency.dstSubpass) {
8940            has_self_dependency[dependency.srcSubpass] = true;
8941        }
8942        if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
8943            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
8944        }
8945        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
8946            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
8947        }
8948    }
8949    return skip_call;
8950}
8951
8952
8953VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
8954                                                                    const VkAllocationCallbacks *pAllocator,
8955                                                                    VkShaderModule *pShaderModule) {
8956    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8957    bool skip_call = false;
8958
8959    /* Use SPIRV-Tools validator to try and catch any issues with the module itself */
8960    spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
8961    spv_const_binary_t binary { pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t) };
8962    spv_diagnostic diag = nullptr;
8963
8964    auto result = spvValidate(ctx, &binary, &diag);
8965    if (result != SPV_SUCCESS) {
8966        skip_call |= log_msg(my_data->report_data,
8967                             result == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
8968                             VkDebugReportObjectTypeEXT(0), 0,
8969                             __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC", "SPIR-V module not valid: %s",
8970                             diag && diag->error ? diag->error : "(no error text)");
8971    }
8972
8973    spvDiagnosticDestroy(diag);
8974    spvContextDestroy(ctx);
8975
8976    if (skip_call)
8977        return VK_ERROR_VALIDATION_FAILED_EXT;
8978
8979    VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
8980
8981    if (res == VK_SUCCESS) {
8982        std::lock_guard<std::mutex> lock(global_lock);
8983        my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
8984    }
8985    return res;
8986}
8987
8988VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
8989                                                                  const VkAllocationCallbacks *pAllocator,
8990                                                                  VkRenderPass *pRenderPass) {
8991    bool skip_call = false;
8992    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8993    std::unique_lock<std::mutex> lock(global_lock);
8994    // Create DAG
8995    std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
8996    std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
8997    skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
8998    // Validate
8999    skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
9000    if (skip_call) {
9001        lock.unlock();
9002        return VK_ERROR_VALIDATION_FAILED_EXT;
9003    }
9004    lock.unlock();
9005    VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
9006    if (VK_SUCCESS == result) {
9007        lock.lock();
9008        // TODOSC : Merge in tracking of renderpass from shader_checker
9009        // Shadow create info and store in map
9010        VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
9011        if (pCreateInfo->pAttachments) {
9012            localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount];
9013            memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments,
9014                   localRPCI->attachmentCount * sizeof(VkAttachmentDescription));
9015        }
9016        if (pCreateInfo->pSubpasses) {
9017            localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount];
9018            memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription));
9019
9020            for (uint32_t i = 0; i < localRPCI->subpassCount; i++) {
9021                VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i];
9022                const uint32_t attachmentCount = subpass->inputAttachmentCount +
9023                                                 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) +
9024                                                 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
9025                VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount];
9026
9027                memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount);
9028                subpass->pInputAttachments = attachments;
9029                attachments += subpass->inputAttachmentCount;
9030
9031                memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9032                subpass->pColorAttachments = attachments;
9033                attachments += subpass->colorAttachmentCount;
9034
9035                if (subpass->pResolveAttachments) {
9036                    memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9037                    subpass->pResolveAttachments = attachments;
9038                    attachments += subpass->colorAttachmentCount;
9039                }
9040
9041                if (subpass->pDepthStencilAttachment) {
9042                    memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1);
9043                    subpass->pDepthStencilAttachment = attachments;
9044                    attachments += 1;
9045                }
9046
9047                memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount);
9048                subpass->pPreserveAttachments = &attachments->attachment;
9049            }
9050        }
9051        if (pCreateInfo->pDependencies) {
9052            localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
9053            memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies,
9054                   localRPCI->dependencyCount * sizeof(VkSubpassDependency));
9055        }
9056        dev_data->renderPassMap[*pRenderPass] = new RENDER_PASS_NODE(localRPCI);
9057        dev_data->renderPassMap[*pRenderPass]->hasSelfDependency = has_self_dependency;
9058        dev_data->renderPassMap[*pRenderPass]->subpassToNode = subpass_to_node;
9059#if MTMERGESOURCE
9060        // MTMTODO : Merge with code from above to eliminate duplication
9061        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9062            VkAttachmentDescription desc = pCreateInfo->pAttachments[i];
9063            MT_PASS_ATTACHMENT_INFO pass_info;
9064            pass_info.load_op = desc.loadOp;
9065            pass_info.store_op = desc.storeOp;
9066            pass_info.attachment = i;
9067            dev_data->renderPassMap[*pRenderPass]->attachments.push_back(pass_info);
9068        }
9069        // TODO: Maybe fill list and then copy instead of locking
9070        std::unordered_map<uint32_t, bool> &attachment_first_read = dev_data->renderPassMap[*pRenderPass]->attachment_first_read;
9071        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout =
9072            dev_data->renderPassMap[*pRenderPass]->attachment_first_layout;
9073        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9074            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9075            if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
9076                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9077                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9078                                     "Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
9079            }
9080            for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9081                uint32_t attachment = subpass.pPreserveAttachments[j];
9082                if (attachment >= pCreateInfo->attachmentCount) {
9083                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9084                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9085                                         "Preserve attachment %d cannot be greater than the total number of attachments %d.",
9086                                         attachment, pCreateInfo->attachmentCount);
9087                }
9088            }
9089            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9090                uint32_t attachment;
9091                if (subpass.pResolveAttachments) {
9092                    attachment = subpass.pResolveAttachments[j].attachment;
9093                    if (attachment >= pCreateInfo->attachmentCount && attachment != VK_ATTACHMENT_UNUSED) {
9094                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9095                                             __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9096                                             "Color attachment %d cannot be greater than the total number of attachments %d.",
9097                                             attachment, pCreateInfo->attachmentCount);
9098                        continue;
9099                    }
9100                }
9101                attachment = subpass.pColorAttachments[j].attachment;
9102                if (attachment >= pCreateInfo->attachmentCount) {
9103                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9104                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9105                                         "Color attachment %d cannot be greater than the total number of attachments %d.",
9106                                         attachment, pCreateInfo->attachmentCount);
9107                    continue;
9108                }
9109                if (attachment_first_read.count(attachment))
9110                    continue;
9111                attachment_first_read.insert(std::make_pair(attachment, false));
9112                attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
9113            }
9114            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9115                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9116                if (attachment >= pCreateInfo->attachmentCount) {
9117                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9118                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9119                                         "Depth stencil attachment %d cannot be greater than the total number of attachments %d.",
9120                                         attachment, pCreateInfo->attachmentCount);
9121                    continue;
9122                }
9123                if (attachment_first_read.count(attachment))
9124                    continue;
9125                attachment_first_read.insert(std::make_pair(attachment, false));
9126                attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
9127            }
9128            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9129                uint32_t attachment = subpass.pInputAttachments[j].attachment;
9130                if (attachment >= pCreateInfo->attachmentCount) {
9131                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9132                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9133                                         "Input attachment %d cannot be greater than the total number of attachments %d.",
9134                                         attachment, pCreateInfo->attachmentCount);
9135                    continue;
9136                }
9137                if (attachment_first_read.count(attachment))
9138                    continue;
9139                attachment_first_read.insert(std::make_pair(attachment, true));
9140                attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
9141            }
9142        }
9143#endif
9144        lock.unlock();
9145    }
9146    return result;
9147}
9148// Free the renderpass shadow
9149static void deleteRenderPasses(layer_data *my_data) {
9150    if (my_data->renderPassMap.size() <= 0)
9151        return;
9152    for (auto ii = my_data->renderPassMap.begin(); ii != my_data->renderPassMap.end(); ++ii) {
9153        const VkRenderPassCreateInfo *pRenderPassInfo = (*ii).second->pCreateInfo;
9154        delete[] pRenderPassInfo->pAttachments;
9155        if (pRenderPassInfo->pSubpasses) {
9156            for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) {
9157                // Attachements are all allocated in a block, so just need to
9158                //  find the first non-null one to delete
9159                if (pRenderPassInfo->pSubpasses[i].pInputAttachments) {
9160                    delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments;
9161                } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) {
9162                    delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments;
9163                } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) {
9164                    delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments;
9165                } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) {
9166                    delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments;
9167                }
9168            }
9169            delete[] pRenderPassInfo->pSubpasses;
9170        }
9171        delete[] pRenderPassInfo->pDependencies;
9172        delete pRenderPassInfo;
9173        delete (*ii).second;
9174    }
9175    my_data->renderPassMap.clear();
9176}
9177
9178static bool VerifyFramebufferAndRenderPassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9179    bool skip_call = false;
9180    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9181    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9182    const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
9183    const VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer].createInfo;
9184    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
9185        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9186                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
9187                                                                 "with a different number of attachments.");
9188    }
9189    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9190        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9191        auto image_data = dev_data->imageViewMap.find(image_view);
9192        assert(image_data != dev_data->imageViewMap.end());
9193        const VkImage &image = image_data->second.image;
9194        const VkImageSubresourceRange &subRange = image_data->second.subresourceRange;
9195        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
9196                                             pRenderPassInfo->pAttachments[i].initialLayout};
9197        // TODO: Do not iterate over every possibility - consolidate where possible
9198        for (uint32_t j = 0; j < subRange.levelCount; j++) {
9199            uint32_t level = subRange.baseMipLevel + j;
9200            for (uint32_t k = 0; k < subRange.layerCount; k++) {
9201                uint32_t layer = subRange.baseArrayLayer + k;
9202                VkImageSubresource sub = {subRange.aspectMask, level, layer};
9203                IMAGE_CMD_BUF_LAYOUT_NODE node;
9204                if (!FindLayout(pCB, image, sub, node)) {
9205                    SetLayout(pCB, image, sub, newNode);
9206                    continue;
9207                }
9208                if (newNode.layout != node.layout) {
9209                    skip_call |=
9210                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9211                                DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using attachment %i "
9212                                                                    "where the "
9213                                                                    "initial layout is %s and the layout of the attachment at the "
9214                                                                    "start of the render pass is %s. The layouts must match.",
9215                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
9216                }
9217            }
9218        }
9219    }
9220    return skip_call;
9221}
9222
9223static void TransitionSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
9224                                     const int subpass_index) {
9225    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9226    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9227    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9228    if (render_pass_data == dev_data->renderPassMap.end()) {
9229        return;
9230    }
9231    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9232    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9233    if (framebuffer_data == dev_data->frameBufferMap.end()) {
9234        return;
9235    }
9236    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9237    const VkSubpassDescription &subpass = pRenderPassInfo->pSubpasses[subpass_index];
9238    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9239        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pInputAttachments[j].attachment];
9240        SetLayout(dev_data, pCB, image_view, subpass.pInputAttachments[j].layout);
9241    }
9242    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9243        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pColorAttachments[j].attachment];
9244        SetLayout(dev_data, pCB, image_view, subpass.pColorAttachments[j].layout);
9245    }
9246    if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
9247        const VkImageView &image_view = framebufferInfo.pAttachments[subpass.pDepthStencilAttachment->attachment];
9248        SetLayout(dev_data, pCB, image_view, subpass.pDepthStencilAttachment->layout);
9249    }
9250}
9251
9252static bool validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
9253    bool skip_call = false;
9254    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
9255        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9256                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
9257                             cmd_name.c_str());
9258    }
9259    return skip_call;
9260}
9261
9262static void TransitionFinalSubpassLayouts(VkCommandBuffer cmdBuffer, const VkRenderPassBeginInfo *pRenderPassBegin) {
9263    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9264    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9265    auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9266    if (render_pass_data == dev_data->renderPassMap.end()) {
9267        return;
9268    }
9269    const VkRenderPassCreateInfo *pRenderPassInfo = render_pass_data->second->pCreateInfo;
9270    auto framebuffer_data = dev_data->frameBufferMap.find(pRenderPassBegin->framebuffer);
9271    if (framebuffer_data == dev_data->frameBufferMap.end()) {
9272        return;
9273    }
9274    const VkFramebufferCreateInfo framebufferInfo = framebuffer_data->second.createInfo;
9275    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9276        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9277        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
9278    }
9279}
9280
9281static bool VerifyRenderAreaBounds(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
9282    bool skip_call = false;
9283    const VkFramebufferCreateInfo *pFramebufferInfo = &my_data->frameBufferMap.at(pRenderPassBegin->framebuffer).createInfo;
9284    if (pRenderPassBegin->renderArea.offset.x < 0 ||
9285        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
9286        pRenderPassBegin->renderArea.offset.y < 0 ||
9287        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
9288        skip_call |= static_cast<bool>(log_msg(
9289            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9290            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
9291            "Cannot execute a render pass with renderArea not within the bound of the "
9292            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
9293            "height %d.",
9294            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
9295            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
9296    }
9297    return skip_call;
9298}
9299
9300VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
9301vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
9302    bool skipCall = false;
9303    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9304    std::unique_lock<std::mutex> lock(global_lock);
9305    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9306    if (pCB) {
9307        if (pRenderPassBegin && pRenderPassBegin->renderPass) {
9308#if MTMERGE
9309            auto pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9310            if (pass_data != dev_data->renderPassMap.end()) {
9311                RENDER_PASS_NODE* pRPNode = pass_data->second;
9312                pRPNode->fb = pRenderPassBegin->framebuffer;
9313                auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
9314                for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9315                    MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i];
9316                    if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
9317                        if (cb_data != dev_data->commandBufferMap.end()) {
9318                            std::function<bool()> function = [=]() {
9319                                set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9320                                return false;
9321                            };
9322                            cb_data->second->validate_functions.push_back(function);
9323                        }
9324                        VkImageLayout &attachment_layout = pRPNode->attachment_first_layout[pRPNode->attachments[i].attachment];
9325                        if (attachment_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL ||
9326                            attachment_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
9327                            skipCall |=
9328                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9329                                        VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, (uint64_t)(pRenderPassBegin->renderPass), __LINE__,
9330                                        MEMTRACK_INVALID_LAYOUT, "MEM", "Cannot clear attachment %d with invalid first layout %d.",
9331                                        pRPNode->attachments[i].attachment, attachment_layout);
9332                        }
9333                    } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE) {
9334                        if (cb_data != dev_data->commandBufferMap.end()) {
9335                            std::function<bool()> function = [=]() {
9336                                set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9337                                return false;
9338                            };
9339                            cb_data->second->validate_functions.push_back(function);
9340                        }
9341                    } else if (pRPNode->attachments[i].load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
9342                        if (cb_data != dev_data->commandBufferMap.end()) {
9343                            std::function<bool()> function = [=]() {
9344                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9345                            };
9346                            cb_data->second->validate_functions.push_back(function);
9347                        }
9348                    }
9349                    if (pRPNode->attachment_first_read[pRPNode->attachments[i].attachment]) {
9350                        if (cb_data != dev_data->commandBufferMap.end()) {
9351                            std::function<bool()> function = [=]() {
9352                                return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9353                            };
9354                            cb_data->second->validate_functions.push_back(function);
9355                        }
9356                    }
9357                }
9358            }
9359#endif
9360            skipCall |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
9361            skipCall |= VerifyFramebufferAndRenderPassLayouts(commandBuffer, pRenderPassBegin);
9362            auto render_pass_data = dev_data->renderPassMap.find(pRenderPassBegin->renderPass);
9363            if (render_pass_data != dev_data->renderPassMap.end()) {
9364                skipCall |= ValidateDependencies(dev_data, pRenderPassBegin, render_pass_data->second->subpassToNode);
9365            }
9366            skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass");
9367            skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass");
9368            skipCall |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
9369            pCB->activeRenderPass = pRenderPassBegin->renderPass;
9370            // This is a shallow copy as that is all that is needed for now
9371            pCB->activeRenderPassBeginInfo = *pRenderPassBegin;
9372            pCB->activeSubpass = 0;
9373            pCB->activeSubpassContents = contents;
9374            pCB->framebuffers.insert(pRenderPassBegin->framebuffer);
9375            // Connect this framebuffer to this cmdBuffer
9376            dev_data->frameBufferMap[pRenderPassBegin->framebuffer].referencingCmdBuffers.insert(pCB->commandBuffer);
9377        } else {
9378            skipCall |=
9379                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9380                        DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
9381        }
9382    }
9383    lock.unlock();
9384    if (!skipCall) {
9385        dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
9386    }
9387}
9388
9389VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
9390    bool skipCall = false;
9391    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9392    std::unique_lock<std::mutex> lock(global_lock);
9393    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9394    if (pCB) {
9395        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
9396        skipCall |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
9397        pCB->activeSubpass++;
9398        pCB->activeSubpassContents = contents;
9399        TransitionSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
9400        if (pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline) {
9401            skipCall |= validatePipelineState(dev_data, pCB, VK_PIPELINE_BIND_POINT_GRAPHICS,
9402                                              pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
9403        }
9404        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
9405    }
9406    lock.unlock();
9407    if (!skipCall)
9408        dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents);
9409}
9410
9411VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass(VkCommandBuffer commandBuffer) {
9412    bool skipCall = false;
9413    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9414    std::unique_lock<std::mutex> lock(global_lock);
9415#if MTMERGESOURCE
9416    auto cb_data = dev_data->commandBufferMap.find(commandBuffer);
9417    if (cb_data != dev_data->commandBufferMap.end()) {
9418        auto pass_data = dev_data->renderPassMap.find(cb_data->second->activeRenderPass);
9419        if (pass_data != dev_data->renderPassMap.end()) {
9420            RENDER_PASS_NODE* pRPNode = pass_data->second;
9421            for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9422                MT_FB_ATTACHMENT_INFO &fb_info = dev_data->frameBufferMap[pRPNode->fb].attachments[i];
9423                if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_STORE) {
9424                    if (cb_data != dev_data->commandBufferMap.end()) {
9425                        std::function<bool()> function = [=]() {
9426                            set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9427                            return false;
9428                        };
9429                        cb_data->second->validate_functions.push_back(function);
9430                    }
9431                } else if (pRPNode->attachments[i].store_op == VK_ATTACHMENT_STORE_OP_DONT_CARE) {
9432                    if (cb_data != dev_data->commandBufferMap.end()) {
9433                        std::function<bool()> function = [=]() {
9434                            set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9435                            return false;
9436                        };
9437                        cb_data->second->validate_functions.push_back(function);
9438                    }
9439                }
9440            }
9441        }
9442    }
9443#endif
9444    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9445    if (pCB) {
9446        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
9447        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
9448        skipCall |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9449        TransitionFinalSubpassLayouts(commandBuffer, &pCB->activeRenderPassBeginInfo);
9450        pCB->activeRenderPass = 0;
9451        pCB->activeSubpass = 0;
9452    }
9453    lock.unlock();
9454    if (!skipCall)
9455        dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer);
9456}
9457
9458static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9459                                        VkRenderPass primaryPass, uint32_t primaryAttach, uint32_t secondaryAttach,
9460                                        const char *msg) {
9461    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9462                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9463                   "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9464                   " that is not compatible with the current render pass %" PRIx64 ". "
9465                   "Attachment %" PRIu32 " is not compatible with %" PRIu32 ". %s",
9466                   (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass), primaryAttach, secondaryAttach,
9467                   msg);
9468}
9469
9470static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9471                                            uint32_t primaryAttach, VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass,
9472                                            uint32_t secondaryAttach, bool is_multi) {
9473    bool skip_call = false;
9474    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9475    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9476    if (primary_data->second->pCreateInfo->attachmentCount <= primaryAttach) {
9477        primaryAttach = VK_ATTACHMENT_UNUSED;
9478    }
9479    if (secondary_data->second->pCreateInfo->attachmentCount <= secondaryAttach) {
9480        secondaryAttach = VK_ATTACHMENT_UNUSED;
9481    }
9482    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
9483        return skip_call;
9484    }
9485    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
9486        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9487                                                 secondaryAttach, "The first is unused while the second is not.");
9488        return skip_call;
9489    }
9490    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
9491        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9492                                                 secondaryAttach, "The second is unused while the first is not.");
9493        return skip_call;
9494    }
9495    if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].format !=
9496        secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].format) {
9497        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9498                                                 secondaryAttach, "They have different formats.");
9499    }
9500    if (primary_data->second->pCreateInfo->pAttachments[primaryAttach].samples !=
9501        secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].samples) {
9502        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9503                                                 secondaryAttach, "They have different samples.");
9504    }
9505    if (is_multi &&
9506        primary_data->second->pCreateInfo->pAttachments[primaryAttach].flags !=
9507            secondary_data->second->pCreateInfo->pAttachments[secondaryAttach].flags) {
9508        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9509                                                 secondaryAttach, "They have different flags.");
9510    }
9511    return skip_call;
9512}
9513
9514static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9515                                         VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass, const int subpass,
9516                                         bool is_multi) {
9517    bool skip_call = false;
9518    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9519    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9520    const VkSubpassDescription &primary_desc = primary_data->second->pCreateInfo->pSubpasses[subpass];
9521    const VkSubpassDescription &secondary_desc = secondary_data->second->pCreateInfo->pSubpasses[subpass];
9522    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
9523    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
9524        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
9525        if (i < primary_desc.inputAttachmentCount) {
9526            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
9527        }
9528        if (i < secondary_desc.inputAttachmentCount) {
9529            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
9530        }
9531        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_input_attach, secondaryBuffer,
9532                                                     secondaryPass, secondary_input_attach, is_multi);
9533    }
9534    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
9535    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
9536        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
9537        if (i < primary_desc.colorAttachmentCount) {
9538            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
9539        }
9540        if (i < secondary_desc.colorAttachmentCount) {
9541            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
9542        }
9543        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_color_attach, secondaryBuffer,
9544                                                     secondaryPass, secondary_color_attach, is_multi);
9545        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
9546        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
9547            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
9548        }
9549        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
9550            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
9551        }
9552        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_resolve_attach, secondaryBuffer,
9553                                                     secondaryPass, secondary_resolve_attach, is_multi);
9554    }
9555    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
9556    if (primary_desc.pDepthStencilAttachment) {
9557        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
9558    }
9559    if (secondary_desc.pDepthStencilAttachment) {
9560        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
9561    }
9562    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_depthstencil_attach, secondaryBuffer,
9563                                                 secondaryPass, secondary_depthstencil_attach, is_multi);
9564    return skip_call;
9565}
9566
9567static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9568                                            VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass) {
9569    bool skip_call = false;
9570    // Early exit if renderPass objects are identical (and therefore compatible)
9571    if (primaryPass == secondaryPass)
9572        return skip_call;
9573    auto primary_data = dev_data->renderPassMap.find(primaryPass);
9574    auto secondary_data = dev_data->renderPassMap.find(secondaryPass);
9575    if (primary_data == dev_data->renderPassMap.end() || primary_data->second == nullptr) {
9576        skip_call |=
9577            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9578                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9579                    "vkCmdExecuteCommands() called w/ invalid current Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9580                    (void *)primaryBuffer, (uint64_t)(primaryPass));
9581        return skip_call;
9582    }
9583    if (secondary_data == dev_data->renderPassMap.end() || secondary_data->second == nullptr) {
9584        skip_call |=
9585            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9586                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9587                    "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer %p which has invalid render pass %" PRIx64 ".",
9588                    (void *)secondaryBuffer, (uint64_t)(secondaryPass));
9589        return skip_call;
9590    }
9591    if (primary_data->second->pCreateInfo->subpassCount != secondary_data->second->pCreateInfo->subpassCount) {
9592        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9593                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9594                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a render pass %" PRIx64
9595                             " that is not compatible with the current render pass %" PRIx64 ". "
9596                             "They have a different number of subpasses.",
9597                             (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass));
9598        return skip_call;
9599    }
9600    bool is_multi = primary_data->second->pCreateInfo->subpassCount > 1;
9601    for (uint32_t i = 0; i < primary_data->second->pCreateInfo->subpassCount; ++i) {
9602        skip_call |=
9603            validateSubpassCompatibility(dev_data, primaryBuffer, primaryPass, secondaryBuffer, secondaryPass, i, is_multi);
9604    }
9605    return skip_call;
9606}
9607
9608static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9609                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
9610    bool skip_call = false;
9611    if (!pSubCB->beginInfo.pInheritanceInfo) {
9612        return skip_call;
9613    }
9614    VkFramebuffer primary_fb = dev_data->renderPassMap[pCB->activeRenderPass]->fb;
9615    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9616    if (secondary_fb != VK_NULL_HANDLE) {
9617        if (primary_fb != secondary_fb) {
9618            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9619                                 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9620                                 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p which has a framebuffer %" PRIx64
9621                                 " that is not compatible with the current framebuffer %" PRIx64 ".",
9622                                 (void *)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb));
9623        }
9624        auto fb_data = dev_data->frameBufferMap.find(secondary_fb);
9625        if (fb_data == dev_data->frameBufferMap.end()) {
9626            skip_call |=
9627                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9628                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9629                                                                          "which has invalid framebuffer %" PRIx64 ".",
9630                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
9631            return skip_call;
9632        }
9633        skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb_data->second.createInfo.renderPass,
9634                                                     secondaryBuffer, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9635    }
9636    return skip_call;
9637}
9638
9639static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9640    bool skipCall = false;
9641    unordered_set<int> activeTypes;
9642    for (auto queryObject : pCB->activeQueries) {
9643        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9644        if (queryPoolData != dev_data->queryPoolMap.end()) {
9645            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9646                pSubCB->beginInfo.pInheritanceInfo) {
9647                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
9648                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
9649                    skipCall |= log_msg(
9650                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9651                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9652                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9653                        "which has invalid active query pool %" PRIx64 ". Pipeline statistics is being queried so the command "
9654                        "buffer must have all bits set on the queryPool.",
9655                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
9656                }
9657            }
9658            activeTypes.insert(queryPoolData->second.createInfo.queryType);
9659        }
9660    }
9661    for (auto queryObject : pSubCB->startedQueries) {
9662        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9663        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
9664            skipCall |=
9665                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9666                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9667                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p "
9668                        "which has invalid active query pool %" PRIx64 "of type %d but a query of that type has been started on "
9669                        "secondary Cmd Buffer %p.",
9670                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
9671                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
9672        }
9673    }
9674    return skipCall;
9675}
9676
9677VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
9678vkCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
9679    bool skipCall = false;
9680    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9681    std::unique_lock<std::mutex> lock(global_lock);
9682    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9683    if (pCB) {
9684        GLOBAL_CB_NODE *pSubCB = NULL;
9685        for (uint32_t i = 0; i < commandBuffersCount; i++) {
9686            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
9687            if (!pSubCB) {
9688                skipCall |=
9689                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9690                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9691                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer %p in element %u of pCommandBuffers array.",
9692                            (void *)pCommandBuffers[i], i);
9693            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
9694                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9695                                    __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9696                                    "vkCmdExecuteCommands() called w/ Primary Cmd Buffer %p in element %u of pCommandBuffers "
9697                                    "array. All cmd buffers in pCommandBuffers array must be secondary.",
9698                                    (void *)pCommandBuffers[i], i);
9699            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
9700                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
9701                    skipCall |= log_msg(
9702                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9703                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
9704                        "vkCmdExecuteCommands(): Secondary Command Buffer (%p) executed within render pass (%#" PRIxLEAST64
9705                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
9706                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass);
9707                } else {
9708                    // Make sure render pass is compatible with parent command buffer pass if has continue
9709                    skipCall |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass, pCommandBuffers[i],
9710                                                                pSubCB->beginInfo.pInheritanceInfo->renderPass);
9711                    skipCall |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
9712                }
9713                string errorString = "";
9714                if (!verify_renderpass_compatibility(dev_data, pCB->activeRenderPass,
9715                                                     pSubCB->beginInfo.pInheritanceInfo->renderPass, errorString)) {
9716                    skipCall |= log_msg(
9717                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9718                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9719                        "vkCmdExecuteCommands(): Secondary Command Buffer (%p) w/ render pass (%#" PRIxLEAST64
9720                        ") is incompatible w/ primary command buffer (%p) w/ render pass (%#" PRIxLEAST64 ") due to: %s",
9721                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
9722                        (uint64_t)pCB->activeRenderPass, errorString.c_str());
9723                }
9724                //  If framebuffer for secondary CB is not NULL, then it must match FB from vkCmdBeginRenderPass()
9725                //   that this CB will be executed in AND framebuffer must have been created w/ RP compatible w/ renderpass
9726                if (pSubCB->beginInfo.pInheritanceInfo->framebuffer) {
9727                    if (pSubCB->beginInfo.pInheritanceInfo->framebuffer != pCB->activeRenderPassBeginInfo.framebuffer) {
9728                        skipCall |= log_msg(
9729                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9730                            (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
9731                            "vkCmdExecuteCommands(): Secondary Command Buffer (%p) references framebuffer (%#" PRIxLEAST64
9732                            ") that does not match framebuffer (%#" PRIxLEAST64 ") in active renderpass (%#" PRIxLEAST64 ").",
9733                            (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer,
9734                            (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass);
9735                    }
9736                }
9737            }
9738            // TODO(mlentine): Move more logic into this method
9739            skipCall |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
9740            skipCall |= validateCommandBufferState(dev_data, pSubCB);
9741            // Secondary cmdBuffers are considered pending execution starting w/
9742            // being recorded
9743            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
9744                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
9745                    skipCall |= log_msg(
9746                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9747                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9748                        "Attempt to simultaneously execute CB %#" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
9749                        "set!",
9750                        (uint64_t)(pCB->commandBuffer));
9751                }
9752                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
9753                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
9754                    skipCall |= log_msg(
9755                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9756                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9757                        "vkCmdExecuteCommands(): Secondary Command Buffer (%#" PRIxLEAST64
9758                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
9759                        "(%#" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
9760                                          "set, even though it does.",
9761                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
9762                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
9763                }
9764            }
9765            if (!pCB->activeQueries.empty() && !dev_data->phys_dev_properties.features.inheritedQueries) {
9766                skipCall |=
9767                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9768                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
9769                            "vkCmdExecuteCommands(): Secondary Command Buffer "
9770                            "(%#" PRIxLEAST64 ") cannot be submitted with a query in "
9771                            "flight and inherited queries not "
9772                            "supported on this device.",
9773                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
9774            }
9775            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
9776            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
9777            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
9778        }
9779        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
9780        skipCall |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
9781    }
9782    lock.unlock();
9783    if (!skipCall)
9784        dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
9785}
9786
9787static bool ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) {
9788    bool skip_call = false;
9789    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9790    auto mem_data = dev_data->memObjMap.find(mem);
9791    if ((mem_data != dev_data->memObjMap.end()) && (mem_data->second.image != VK_NULL_HANDLE)) {
9792        std::vector<VkImageLayout> layouts;
9793        if (FindLayouts(dev_data, mem_data->second.image, layouts)) {
9794            for (auto layout : layouts) {
9795                if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
9796                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9797                                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
9798                                                                                         "GENERAL or PREINITIALIZED are supported.",
9799                                         string_VkImageLayout(layout));
9800                }
9801            }
9802        }
9803    }
9804    return skip_call;
9805}
9806
9807VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
9808vkMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
9809    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9810
9811    bool skip_call = false;
9812    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9813    std::unique_lock<std::mutex> lock(global_lock);
9814#if MTMERGESOURCE
9815    DEVICE_MEM_INFO *pMemObj = get_mem_obj_info(dev_data, mem);
9816    if (pMemObj) {
9817        pMemObj->valid = true;
9818        if ((dev_data->phys_dev_mem_props.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags &
9819             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
9820            skip_call =
9821                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9822                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
9823                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj %#" PRIxLEAST64, (uint64_t)mem);
9824        }
9825    }
9826    skip_call |= validateMemRange(dev_data, mem, offset, size);
9827    storeMemRanges(dev_data, mem, offset, size);
9828#endif
9829    skip_call |= ValidateMapImageLayouts(device, mem);
9830    lock.unlock();
9831
9832    if (!skip_call) {
9833        result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData);
9834#if MTMERGESOURCE
9835        lock.lock();
9836        initializeAndTrackMemory(dev_data, mem, size, ppData);
9837        lock.unlock();
9838#endif
9839    }
9840    return result;
9841}
9842
9843#if MTMERGESOURCE
9844VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkUnmapMemory(VkDevice device, VkDeviceMemory mem) {
9845    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9846    bool skipCall = false;
9847
9848    std::unique_lock<std::mutex> lock(global_lock);
9849    skipCall |= deleteMemRanges(my_data, mem);
9850    lock.unlock();
9851    if (!skipCall) {
9852        my_data->device_dispatch_table->UnmapMemory(device, mem);
9853    }
9854}
9855
9856static bool validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount,
9857                                   const VkMappedMemoryRange *pMemRanges) {
9858    bool skipCall = false;
9859    for (uint32_t i = 0; i < memRangeCount; ++i) {
9860        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
9861        if (mem_element != my_data->memObjMap.end()) {
9862            if (mem_element->second.memRange.offset > pMemRanges[i].offset) {
9863                skipCall |= log_msg(
9864                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9865                    (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
9866                    "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
9867                    "(" PRINTF_SIZE_T_SPECIFIER ").",
9868                    funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_element->second.memRange.offset));
9869            }
9870            if ((mem_element->second.memRange.size != VK_WHOLE_SIZE) &&
9871                ((mem_element->second.memRange.offset + mem_element->second.memRange.size) <
9872                 (pMemRanges[i].offset + pMemRanges[i].size))) {
9873                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9874                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9875                                    MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
9876                                                                 ") exceeds the Memory Object's upper-bound "
9877                                                                 "(" PRINTF_SIZE_T_SPECIFIER ").",
9878                                    funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
9879                                    static_cast<size_t>(mem_element->second.memRange.offset + mem_element->second.memRange.size));
9880            }
9881        }
9882    }
9883    return skipCall;
9884}
9885
9886static bool validateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount,
9887                                                     const VkMappedMemoryRange *pMemRanges) {
9888    bool skipCall = false;
9889    for (uint32_t i = 0; i < memRangeCount; ++i) {
9890        auto mem_element = my_data->memObjMap.find(pMemRanges[i].memory);
9891        if (mem_element != my_data->memObjMap.end()) {
9892            if (mem_element->second.pData) {
9893                VkDeviceSize size = mem_element->second.memRange.size;
9894                VkDeviceSize half_size = (size / 2);
9895                char *data = static_cast<char *>(mem_element->second.pData);
9896                for (auto j = 0; j < half_size; ++j) {
9897                    if (data[j] != NoncoherentMemoryFillValue) {
9898                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9899                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9900                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
9901                                            (uint64_t)pMemRanges[i].memory);
9902                    }
9903                }
9904                for (auto j = size + half_size; j < 2 * size; ++j) {
9905                    if (data[j] != NoncoherentMemoryFillValue) {
9906                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9907                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9908                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj %" PRIxLEAST64,
9909                                            (uint64_t)pMemRanges[i].memory);
9910                    }
9911                }
9912                memcpy(mem_element->second.pDriverData, static_cast<void *>(data + (size_t)(half_size)), (size_t)(size));
9913            }
9914        }
9915    }
9916    return skipCall;
9917}
9918
9919VK_LAYER_EXPORT VkResult VKAPI_CALL
9920vkFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
9921    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9922    bool skipCall = false;
9923    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9924
9925    std::unique_lock<std::mutex> lock(global_lock);
9926    skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
9927    skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
9928    lock.unlock();
9929    if (!skipCall) {
9930        result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
9931    }
9932    return result;
9933}
9934
9935VK_LAYER_EXPORT VkResult VKAPI_CALL
9936vkInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
9937    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9938    bool skipCall = false;
9939    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9940
9941    std::unique_lock<std::mutex> lock(global_lock);
9942    skipCall |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
9943    lock.unlock();
9944    if (!skipCall) {
9945        result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
9946    }
9947    return result;
9948}
9949#endif
9950
9951VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
9952    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9953    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9954    bool skipCall = false;
9955    std::unique_lock<std::mutex> lock(global_lock);
9956    auto image_node = dev_data->imageMap.find(image);
9957    if (image_node != dev_data->imageMap.end()) {
9958        // Track objects tied to memory
9959        uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
9960        skipCall = set_mem_binding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
9961        VkMemoryRequirements memRequirements;
9962        lock.unlock();
9963        dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements);
9964        lock.lock();
9965
9966        // Track and validate bound memory range information
9967        const auto &memEntry = dev_data->memObjMap.find(mem);
9968        if (memEntry != dev_data->memObjMap.end()) {
9969            const MEMORY_RANGE range =
9970                insert_memory_ranges(image_handle, mem, memoryOffset, memRequirements, memEntry->second.imageRanges);
9971            skipCall |=
9972                validate_memory_range(dev_data, memEntry->second.bufferRanges, range, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
9973        }
9974
9975        print_mem_list(dev_data);
9976        lock.unlock();
9977        if (!skipCall) {
9978            result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset);
9979            lock.lock();
9980            dev_data->memObjMap[mem].image = image;
9981            image_node->second.mem = mem;
9982            image_node->second.memOffset = memoryOffset;
9983            image_node->second.memSize = memRequirements.size;
9984            lock.unlock();
9985        }
9986    } else {
9987        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9988                reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
9989                "vkBindImageMemory: Cannot find invalid image %" PRIx64 ", has it already been deleted?",
9990                reinterpret_cast<const uint64_t &>(image));
9991    }
9992    return result;
9993}
9994
9995VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent(VkDevice device, VkEvent event) {
9996    bool skip_call = false;
9997    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9998    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9999    std::unique_lock<std::mutex> lock(global_lock);
10000    auto event_node = dev_data->eventMap.find(event);
10001    if (event_node != dev_data->eventMap.end()) {
10002        event_node->second.needsSignaled = false;
10003        event_node->second.stageMask = VK_PIPELINE_STAGE_HOST_BIT;
10004        if (event_node->second.in_use.load()) {
10005            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
10006                                 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10007                                 "Cannot call vkSetEvent() on event %" PRIxLEAST64 " that is already in use by a command buffer.",
10008                                 reinterpret_cast<const uint64_t &>(event));
10009        }
10010    }
10011    lock.unlock();
10012    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
10013    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
10014    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
10015    for (auto queue_data : dev_data->queueMap) {
10016        auto event_entry = queue_data.second.eventToStageMap.find(event);
10017        if (event_entry != queue_data.second.eventToStageMap.end()) {
10018            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
10019        }
10020    }
10021    if (!skip_call)
10022        result = dev_data->device_dispatch_table->SetEvent(device, event);
10023    return result;
10024}
10025
10026VKAPI_ATTR VkResult VKAPI_CALL
10027vkQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
10028    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10029    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10030    bool skip_call = false;
10031    std::unique_lock<std::mutex> lock(global_lock);
10032    // First verify that fence is not in use
10033    if (fence != VK_NULL_HANDLE) {
10034        trackCommandBuffers(dev_data, queue, 0, nullptr, fence);
10035        auto fence_data = dev_data->fenceMap.find(fence);
10036        if ((bindInfoCount != 0) && fence_data->second.in_use.load()) {
10037            skip_call |=
10038                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
10039                        reinterpret_cast<uint64_t &>(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
10040                        "Fence %#" PRIx64 " is already in use by another submission.", reinterpret_cast<uint64_t &>(fence));
10041        }
10042        if (!fence_data->second.needsSignaled) {
10043            skip_call |=
10044                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
10045                        reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
10046                        "Fence %#" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
10047                        reinterpret_cast<uint64_t &>(fence));
10048        }
10049    }
10050    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
10051        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
10052        // Track objects tied to memory
10053        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
10054            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
10055                if (set_sparse_mem_binding(dev_data, bindInfo.pBufferBinds[j].pBinds[k].memory,
10056                                           (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
10057                                           "vkQueueBindSparse"))
10058                    skip_call = true;
10059            }
10060        }
10061        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
10062            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
10063                if (set_sparse_mem_binding(dev_data, bindInfo.pImageOpaqueBinds[j].pBinds[k].memory,
10064                                           (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10065                                           "vkQueueBindSparse"))
10066                    skip_call = true;
10067            }
10068        }
10069        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
10070            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
10071                if (set_sparse_mem_binding(dev_data, bindInfo.pImageBinds[j].pBinds[k].memory,
10072                                           (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10073                                           "vkQueueBindSparse"))
10074                    skip_call = true;
10075            }
10076        }
10077        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10078            const VkSemaphore &semaphore = bindInfo.pWaitSemaphores[i];
10079            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
10080                if (dev_data->semaphoreMap[semaphore].signaled) {
10081                    dev_data->semaphoreMap[semaphore].signaled = false;
10082                } else {
10083                    skip_call |=
10084                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10085                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10086                                "vkQueueBindSparse: Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64
10087                                " that has no way to be signaled.",
10088                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
10089                }
10090            }
10091        }
10092        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
10093            const VkSemaphore &semaphore = bindInfo.pSignalSemaphores[i];
10094            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
10095                if (dev_data->semaphoreMap[semaphore].signaled) {
10096                    skip_call =
10097                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10098                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10099                                "vkQueueBindSparse: Queue %#" PRIx64 " is signaling semaphore %#" PRIx64
10100                                ", but that semaphore is already signaled.",
10101                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
10102                }
10103                dev_data->semaphoreMap[semaphore].signaled = true;
10104            }
10105        }
10106    }
10107    print_mem_list(dev_data);
10108    lock.unlock();
10109
10110    if (!skip_call)
10111        return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
10112
10113    return result;
10114}
10115
10116VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
10117                                                 const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
10118    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10119    VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
10120    if (result == VK_SUCCESS) {
10121        std::lock_guard<std::mutex> lock(global_lock);
10122        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
10123        sNode->signaled = false;
10124        sNode->queue = VK_NULL_HANDLE;
10125        sNode->in_use.store(0);
10126    }
10127    return result;
10128}
10129
10130VKAPI_ATTR VkResult VKAPI_CALL
10131vkCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
10132    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10133    VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
10134    if (result == VK_SUCCESS) {
10135        std::lock_guard<std::mutex> lock(global_lock);
10136        dev_data->eventMap[*pEvent].needsSignaled = false;
10137        dev_data->eventMap[*pEvent].in_use.store(0);
10138        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
10139    }
10140    return result;
10141}
10142
10143VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
10144                                                                    const VkAllocationCallbacks *pAllocator,
10145                                                                    VkSwapchainKHR *pSwapchain) {
10146    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10147    VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
10148
10149    if (VK_SUCCESS == result) {
10150        SWAPCHAIN_NODE *psc_node = new SWAPCHAIN_NODE(pCreateInfo);
10151        std::lock_guard<std::mutex> lock(global_lock);
10152        dev_data->device_extensions.swapchainMap[*pSwapchain] = psc_node;
10153    }
10154
10155    return result;
10156}
10157
10158VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10159vkDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
10160    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10161    bool skipCall = false;
10162
10163    std::unique_lock<std::mutex> lock(global_lock);
10164    auto swapchain_data = dev_data->device_extensions.swapchainMap.find(swapchain);
10165    if (swapchain_data != dev_data->device_extensions.swapchainMap.end()) {
10166        if (swapchain_data->second->images.size() > 0) {
10167            for (auto swapchain_image : swapchain_data->second->images) {
10168                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
10169                if (image_sub != dev_data->imageSubresourceMap.end()) {
10170                    for (auto imgsubpair : image_sub->second) {
10171                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
10172                        if (image_item != dev_data->imageLayoutMap.end()) {
10173                            dev_data->imageLayoutMap.erase(image_item);
10174                        }
10175                    }
10176                    dev_data->imageSubresourceMap.erase(image_sub);
10177                }
10178                skipCall = clear_object_binding(dev_data, (uint64_t)swapchain_image,
10179                                                VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
10180                dev_data->imageMap.erase(swapchain_image);
10181            }
10182        }
10183        delete swapchain_data->second;
10184        dev_data->device_extensions.swapchainMap.erase(swapchain);
10185    }
10186    lock.unlock();
10187    if (!skipCall)
10188        dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator);
10189}
10190
10191VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10192vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
10193    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10194    VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
10195
10196    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
10197        // This should never happen and is checked by param checker.
10198        if (!pCount)
10199            return result;
10200        std::lock_guard<std::mutex> lock(global_lock);
10201        const size_t count = *pCount;
10202        auto swapchain_node = dev_data->device_extensions.swapchainMap[swapchain];
10203        if (!swapchain_node->images.empty()) {
10204            // TODO : Not sure I like the memcmp here, but it works
10205            const bool mismatch = (swapchain_node->images.size() != count ||
10206                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
10207            if (mismatch) {
10208                // TODO: Verify against Valid Usage section of extension
10209                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10210                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
10211                        "vkGetSwapchainInfoKHR(%" PRIu64
10212                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
10213                        (uint64_t)(swapchain));
10214            }
10215        }
10216        for (uint32_t i = 0; i < *pCount; ++i) {
10217            IMAGE_LAYOUT_NODE image_layout_node;
10218            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
10219            image_layout_node.format = swapchain_node->createInfo.imageFormat;
10220            auto &image_node = dev_data->imageMap[pSwapchainImages[i]];
10221            image_node.createInfo.mipLevels = 1;
10222            image_node.createInfo.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
10223            image_node.createInfo.usage = swapchain_node->createInfo.imageUsage;
10224            image_node.valid = false;
10225            image_node.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
10226            swapchain_node->images.push_back(pSwapchainImages[i]);
10227            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
10228            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
10229            dev_data->imageLayoutMap[subpair] = image_layout_node;
10230            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
10231        }
10232    }
10233    return result;
10234}
10235
10236VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
10237    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10238    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10239    bool skip_call = false;
10240
10241    if (pPresentInfo) {
10242        std::lock_guard<std::mutex> lock(global_lock);
10243        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10244            const VkSemaphore &semaphore = pPresentInfo->pWaitSemaphores[i];
10245            if (dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
10246                if (dev_data->semaphoreMap[semaphore].signaled) {
10247                    dev_data->semaphoreMap[semaphore].signaled = false;
10248                } else {
10249                    skip_call |=
10250                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10251                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10252                                "Queue %#" PRIx64 " is waiting on semaphore %#" PRIx64 " that has no way to be signaled.",
10253                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
10254                }
10255            }
10256        }
10257        VkDeviceMemory mem;
10258        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10259            auto swapchain_data = dev_data->device_extensions.swapchainMap.find(pPresentInfo->pSwapchains[i]);
10260            if (swapchain_data != dev_data->device_extensions.swapchainMap.end() &&
10261                pPresentInfo->pImageIndices[i] < swapchain_data->second->images.size()) {
10262                VkImage image = swapchain_data->second->images[pPresentInfo->pImageIndices[i]];
10263#if MTMERGESOURCE
10264                skip_call |=
10265                    get_mem_binding_from_object(dev_data, (uint64_t)(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, &mem);
10266                skip_call |= validate_memory_is_valid(dev_data, mem, "vkQueuePresentKHR()", image);
10267#endif
10268                vector<VkImageLayout> layouts;
10269                if (FindLayouts(dev_data, image, layouts)) {
10270                    for (auto layout : layouts) {
10271                        if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
10272                            skip_call |=
10273                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
10274                                        reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10275                                        "Images passed to present must be in layout "
10276                                        "PRESENT_SOURCE_KHR but is in %s",
10277                                        string_VkImageLayout(layout));
10278                        }
10279                    }
10280                }
10281            }
10282        }
10283    }
10284
10285    if (!skip_call)
10286        result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo);
10287
10288    return result;
10289}
10290
10291VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
10292                                                     VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
10293    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10294    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10295    bool skipCall = false;
10296
10297    std::unique_lock<std::mutex> lock(global_lock);
10298    if (semaphore != VK_NULL_HANDLE &&
10299        dev_data->semaphoreMap.find(semaphore) != dev_data->semaphoreMap.end()) {
10300        if (dev_data->semaphoreMap[semaphore].signaled) {
10301            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10302                               reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10303                               "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
10304        }
10305        dev_data->semaphoreMap[semaphore].signaled = true;
10306    }
10307    auto fence_data = dev_data->fenceMap.find(fence);
10308    if (fence_data != dev_data->fenceMap.end()) {
10309        fence_data->second.swapchain = swapchain;
10310    }
10311    lock.unlock();
10312
10313    if (!skipCall) {
10314        result =
10315            dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
10316    }
10317
10318    return result;
10319}
10320
10321VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10322vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10323                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
10324    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10325    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10326    VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10327    if (VK_SUCCESS == res) {
10328        std::lock_guard<std::mutex> lock(global_lock);
10329        res = layer_create_msg_callback(my_data->report_data, pCreateInfo, pAllocator, pMsgCallback);
10330    }
10331    return res;
10332}
10333
10334VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance,
10335                                                                           VkDebugReportCallbackEXT msgCallback,
10336                                                                           const VkAllocationCallbacks *pAllocator) {
10337    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10338    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10339    pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10340    std::lock_guard<std::mutex> lock(global_lock);
10341    layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
10342}
10343
10344VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL
10345vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
10346                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10347    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10348    my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
10349                                                            pMsg);
10350}
10351
10352VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10353    if (!strcmp(funcName, "vkGetDeviceProcAddr"))
10354        return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
10355    if (!strcmp(funcName, "vkDestroyDevice"))
10356        return (PFN_vkVoidFunction)vkDestroyDevice;
10357    if (!strcmp(funcName, "vkQueueSubmit"))
10358        return (PFN_vkVoidFunction)vkQueueSubmit;
10359    if (!strcmp(funcName, "vkWaitForFences"))
10360        return (PFN_vkVoidFunction)vkWaitForFences;
10361    if (!strcmp(funcName, "vkGetFenceStatus"))
10362        return (PFN_vkVoidFunction)vkGetFenceStatus;
10363    if (!strcmp(funcName, "vkQueueWaitIdle"))
10364        return (PFN_vkVoidFunction)vkQueueWaitIdle;
10365    if (!strcmp(funcName, "vkDeviceWaitIdle"))
10366        return (PFN_vkVoidFunction)vkDeviceWaitIdle;
10367    if (!strcmp(funcName, "vkGetDeviceQueue"))
10368        return (PFN_vkVoidFunction)vkGetDeviceQueue;
10369    if (!strcmp(funcName, "vkDestroyInstance"))
10370        return (PFN_vkVoidFunction)vkDestroyInstance;
10371    if (!strcmp(funcName, "vkDestroyDevice"))
10372        return (PFN_vkVoidFunction)vkDestroyDevice;
10373    if (!strcmp(funcName, "vkDestroyFence"))
10374        return (PFN_vkVoidFunction)vkDestroyFence;
10375    if (!strcmp(funcName, "vkResetFences"))
10376        return (PFN_vkVoidFunction)vkResetFences;
10377    if (!strcmp(funcName, "vkDestroySemaphore"))
10378        return (PFN_vkVoidFunction)vkDestroySemaphore;
10379    if (!strcmp(funcName, "vkDestroyEvent"))
10380        return (PFN_vkVoidFunction)vkDestroyEvent;
10381    if (!strcmp(funcName, "vkDestroyQueryPool"))
10382        return (PFN_vkVoidFunction)vkDestroyQueryPool;
10383    if (!strcmp(funcName, "vkDestroyBuffer"))
10384        return (PFN_vkVoidFunction)vkDestroyBuffer;
10385    if (!strcmp(funcName, "vkDestroyBufferView"))
10386        return (PFN_vkVoidFunction)vkDestroyBufferView;
10387    if (!strcmp(funcName, "vkDestroyImage"))
10388        return (PFN_vkVoidFunction)vkDestroyImage;
10389    if (!strcmp(funcName, "vkDestroyImageView"))
10390        return (PFN_vkVoidFunction)vkDestroyImageView;
10391    if (!strcmp(funcName, "vkDestroyShaderModule"))
10392        return (PFN_vkVoidFunction)vkDestroyShaderModule;
10393    if (!strcmp(funcName, "vkDestroyPipeline"))
10394        return (PFN_vkVoidFunction)vkDestroyPipeline;
10395    if (!strcmp(funcName, "vkDestroyPipelineLayout"))
10396        return (PFN_vkVoidFunction)vkDestroyPipelineLayout;
10397    if (!strcmp(funcName, "vkDestroySampler"))
10398        return (PFN_vkVoidFunction)vkDestroySampler;
10399    if (!strcmp(funcName, "vkDestroyDescriptorSetLayout"))
10400        return (PFN_vkVoidFunction)vkDestroyDescriptorSetLayout;
10401    if (!strcmp(funcName, "vkDestroyDescriptorPool"))
10402        return (PFN_vkVoidFunction)vkDestroyDescriptorPool;
10403    if (!strcmp(funcName, "vkDestroyFramebuffer"))
10404        return (PFN_vkVoidFunction)vkDestroyFramebuffer;
10405    if (!strcmp(funcName, "vkDestroyRenderPass"))
10406        return (PFN_vkVoidFunction)vkDestroyRenderPass;
10407    if (!strcmp(funcName, "vkCreateBuffer"))
10408        return (PFN_vkVoidFunction)vkCreateBuffer;
10409    if (!strcmp(funcName, "vkCreateBufferView"))
10410        return (PFN_vkVoidFunction)vkCreateBufferView;
10411    if (!strcmp(funcName, "vkCreateImage"))
10412        return (PFN_vkVoidFunction)vkCreateImage;
10413    if (!strcmp(funcName, "vkCreateImageView"))
10414        return (PFN_vkVoidFunction)vkCreateImageView;
10415    if (!strcmp(funcName, "vkCreateFence"))
10416        return (PFN_vkVoidFunction)vkCreateFence;
10417    if (!strcmp(funcName, "CreatePipelineCache"))
10418        return (PFN_vkVoidFunction)vkCreatePipelineCache;
10419    if (!strcmp(funcName, "DestroyPipelineCache"))
10420        return (PFN_vkVoidFunction)vkDestroyPipelineCache;
10421    if (!strcmp(funcName, "GetPipelineCacheData"))
10422        return (PFN_vkVoidFunction)vkGetPipelineCacheData;
10423    if (!strcmp(funcName, "MergePipelineCaches"))
10424        return (PFN_vkVoidFunction)vkMergePipelineCaches;
10425    if (!strcmp(funcName, "vkCreateGraphicsPipelines"))
10426        return (PFN_vkVoidFunction)vkCreateGraphicsPipelines;
10427    if (!strcmp(funcName, "vkCreateComputePipelines"))
10428        return (PFN_vkVoidFunction)vkCreateComputePipelines;
10429    if (!strcmp(funcName, "vkCreateSampler"))
10430        return (PFN_vkVoidFunction)vkCreateSampler;
10431    if (!strcmp(funcName, "vkCreateDescriptorSetLayout"))
10432        return (PFN_vkVoidFunction)vkCreateDescriptorSetLayout;
10433    if (!strcmp(funcName, "vkCreatePipelineLayout"))
10434        return (PFN_vkVoidFunction)vkCreatePipelineLayout;
10435    if (!strcmp(funcName, "vkCreateDescriptorPool"))
10436        return (PFN_vkVoidFunction)vkCreateDescriptorPool;
10437    if (!strcmp(funcName, "vkResetDescriptorPool"))
10438        return (PFN_vkVoidFunction)vkResetDescriptorPool;
10439    if (!strcmp(funcName, "vkAllocateDescriptorSets"))
10440        return (PFN_vkVoidFunction)vkAllocateDescriptorSets;
10441    if (!strcmp(funcName, "vkFreeDescriptorSets"))
10442        return (PFN_vkVoidFunction)vkFreeDescriptorSets;
10443    if (!strcmp(funcName, "vkUpdateDescriptorSets"))
10444        return (PFN_vkVoidFunction)vkUpdateDescriptorSets;
10445    if (!strcmp(funcName, "vkCreateCommandPool"))
10446        return (PFN_vkVoidFunction)vkCreateCommandPool;
10447    if (!strcmp(funcName, "vkDestroyCommandPool"))
10448        return (PFN_vkVoidFunction)vkDestroyCommandPool;
10449    if (!strcmp(funcName, "vkResetCommandPool"))
10450        return (PFN_vkVoidFunction)vkResetCommandPool;
10451    if (!strcmp(funcName, "vkCreateQueryPool"))
10452        return (PFN_vkVoidFunction)vkCreateQueryPool;
10453    if (!strcmp(funcName, "vkAllocateCommandBuffers"))
10454        return (PFN_vkVoidFunction)vkAllocateCommandBuffers;
10455    if (!strcmp(funcName, "vkFreeCommandBuffers"))
10456        return (PFN_vkVoidFunction)vkFreeCommandBuffers;
10457    if (!strcmp(funcName, "vkBeginCommandBuffer"))
10458        return (PFN_vkVoidFunction)vkBeginCommandBuffer;
10459    if (!strcmp(funcName, "vkEndCommandBuffer"))
10460        return (PFN_vkVoidFunction)vkEndCommandBuffer;
10461    if (!strcmp(funcName, "vkResetCommandBuffer"))
10462        return (PFN_vkVoidFunction)vkResetCommandBuffer;
10463    if (!strcmp(funcName, "vkCmdBindPipeline"))
10464        return (PFN_vkVoidFunction)vkCmdBindPipeline;
10465    if (!strcmp(funcName, "vkCmdSetViewport"))
10466        return (PFN_vkVoidFunction)vkCmdSetViewport;
10467    if (!strcmp(funcName, "vkCmdSetScissor"))
10468        return (PFN_vkVoidFunction)vkCmdSetScissor;
10469    if (!strcmp(funcName, "vkCmdSetLineWidth"))
10470        return (PFN_vkVoidFunction)vkCmdSetLineWidth;
10471    if (!strcmp(funcName, "vkCmdSetDepthBias"))
10472        return (PFN_vkVoidFunction)vkCmdSetDepthBias;
10473    if (!strcmp(funcName, "vkCmdSetBlendConstants"))
10474        return (PFN_vkVoidFunction)vkCmdSetBlendConstants;
10475    if (!strcmp(funcName, "vkCmdSetDepthBounds"))
10476        return (PFN_vkVoidFunction)vkCmdSetDepthBounds;
10477    if (!strcmp(funcName, "vkCmdSetStencilCompareMask"))
10478        return (PFN_vkVoidFunction)vkCmdSetStencilCompareMask;
10479    if (!strcmp(funcName, "vkCmdSetStencilWriteMask"))
10480        return (PFN_vkVoidFunction)vkCmdSetStencilWriteMask;
10481    if (!strcmp(funcName, "vkCmdSetStencilReference"))
10482        return (PFN_vkVoidFunction)vkCmdSetStencilReference;
10483    if (!strcmp(funcName, "vkCmdBindDescriptorSets"))
10484        return (PFN_vkVoidFunction)vkCmdBindDescriptorSets;
10485    if (!strcmp(funcName, "vkCmdBindVertexBuffers"))
10486        return (PFN_vkVoidFunction)vkCmdBindVertexBuffers;
10487    if (!strcmp(funcName, "vkCmdBindIndexBuffer"))
10488        return (PFN_vkVoidFunction)vkCmdBindIndexBuffer;
10489    if (!strcmp(funcName, "vkCmdDraw"))
10490        return (PFN_vkVoidFunction)vkCmdDraw;
10491    if (!strcmp(funcName, "vkCmdDrawIndexed"))
10492        return (PFN_vkVoidFunction)vkCmdDrawIndexed;
10493    if (!strcmp(funcName, "vkCmdDrawIndirect"))
10494        return (PFN_vkVoidFunction)vkCmdDrawIndirect;
10495    if (!strcmp(funcName, "vkCmdDrawIndexedIndirect"))
10496        return (PFN_vkVoidFunction)vkCmdDrawIndexedIndirect;
10497    if (!strcmp(funcName, "vkCmdDispatch"))
10498        return (PFN_vkVoidFunction)vkCmdDispatch;
10499    if (!strcmp(funcName, "vkCmdDispatchIndirect"))
10500        return (PFN_vkVoidFunction)vkCmdDispatchIndirect;
10501    if (!strcmp(funcName, "vkCmdCopyBuffer"))
10502        return (PFN_vkVoidFunction)vkCmdCopyBuffer;
10503    if (!strcmp(funcName, "vkCmdCopyImage"))
10504        return (PFN_vkVoidFunction)vkCmdCopyImage;
10505    if (!strcmp(funcName, "vkCmdBlitImage"))
10506        return (PFN_vkVoidFunction)vkCmdBlitImage;
10507    if (!strcmp(funcName, "vkCmdCopyBufferToImage"))
10508        return (PFN_vkVoidFunction)vkCmdCopyBufferToImage;
10509    if (!strcmp(funcName, "vkCmdCopyImageToBuffer"))
10510        return (PFN_vkVoidFunction)vkCmdCopyImageToBuffer;
10511    if (!strcmp(funcName, "vkCmdUpdateBuffer"))
10512        return (PFN_vkVoidFunction)vkCmdUpdateBuffer;
10513    if (!strcmp(funcName, "vkCmdFillBuffer"))
10514        return (PFN_vkVoidFunction)vkCmdFillBuffer;
10515    if (!strcmp(funcName, "vkCmdClearColorImage"))
10516        return (PFN_vkVoidFunction)vkCmdClearColorImage;
10517    if (!strcmp(funcName, "vkCmdClearDepthStencilImage"))
10518        return (PFN_vkVoidFunction)vkCmdClearDepthStencilImage;
10519    if (!strcmp(funcName, "vkCmdClearAttachments"))
10520        return (PFN_vkVoidFunction)vkCmdClearAttachments;
10521    if (!strcmp(funcName, "vkCmdResolveImage"))
10522        return (PFN_vkVoidFunction)vkCmdResolveImage;
10523    if (!strcmp(funcName, "vkCmdSetEvent"))
10524        return (PFN_vkVoidFunction)vkCmdSetEvent;
10525    if (!strcmp(funcName, "vkCmdResetEvent"))
10526        return (PFN_vkVoidFunction)vkCmdResetEvent;
10527    if (!strcmp(funcName, "vkCmdWaitEvents"))
10528        return (PFN_vkVoidFunction)vkCmdWaitEvents;
10529    if (!strcmp(funcName, "vkCmdPipelineBarrier"))
10530        return (PFN_vkVoidFunction)vkCmdPipelineBarrier;
10531    if (!strcmp(funcName, "vkCmdBeginQuery"))
10532        return (PFN_vkVoidFunction)vkCmdBeginQuery;
10533    if (!strcmp(funcName, "vkCmdEndQuery"))
10534        return (PFN_vkVoidFunction)vkCmdEndQuery;
10535    if (!strcmp(funcName, "vkCmdResetQueryPool"))
10536        return (PFN_vkVoidFunction)vkCmdResetQueryPool;
10537    if (!strcmp(funcName, "vkCmdCopyQueryPoolResults"))
10538        return (PFN_vkVoidFunction)vkCmdCopyQueryPoolResults;
10539    if (!strcmp(funcName, "vkCmdPushConstants"))
10540        return (PFN_vkVoidFunction)vkCmdPushConstants;
10541    if (!strcmp(funcName, "vkCmdWriteTimestamp"))
10542        return (PFN_vkVoidFunction)vkCmdWriteTimestamp;
10543    if (!strcmp(funcName, "vkCreateFramebuffer"))
10544        return (PFN_vkVoidFunction)vkCreateFramebuffer;
10545    if (!strcmp(funcName, "vkCreateShaderModule"))
10546        return (PFN_vkVoidFunction)vkCreateShaderModule;
10547    if (!strcmp(funcName, "vkCreateRenderPass"))
10548        return (PFN_vkVoidFunction)vkCreateRenderPass;
10549    if (!strcmp(funcName, "vkCmdBeginRenderPass"))
10550        return (PFN_vkVoidFunction)vkCmdBeginRenderPass;
10551    if (!strcmp(funcName, "vkCmdNextSubpass"))
10552        return (PFN_vkVoidFunction)vkCmdNextSubpass;
10553    if (!strcmp(funcName, "vkCmdEndRenderPass"))
10554        return (PFN_vkVoidFunction)vkCmdEndRenderPass;
10555    if (!strcmp(funcName, "vkCmdExecuteCommands"))
10556        return (PFN_vkVoidFunction)vkCmdExecuteCommands;
10557    if (!strcmp(funcName, "vkSetEvent"))
10558        return (PFN_vkVoidFunction)vkSetEvent;
10559    if (!strcmp(funcName, "vkMapMemory"))
10560        return (PFN_vkVoidFunction)vkMapMemory;
10561#if MTMERGESOURCE
10562    if (!strcmp(funcName, "vkUnmapMemory"))
10563        return (PFN_vkVoidFunction)vkUnmapMemory;
10564    if (!strcmp(funcName, "vkAllocateMemory"))
10565        return (PFN_vkVoidFunction)vkAllocateMemory;
10566    if (!strcmp(funcName, "vkFreeMemory"))
10567        return (PFN_vkVoidFunction)vkFreeMemory;
10568    if (!strcmp(funcName, "vkFlushMappedMemoryRanges"))
10569        return (PFN_vkVoidFunction)vkFlushMappedMemoryRanges;
10570    if (!strcmp(funcName, "vkInvalidateMappedMemoryRanges"))
10571        return (PFN_vkVoidFunction)vkInvalidateMappedMemoryRanges;
10572    if (!strcmp(funcName, "vkBindBufferMemory"))
10573        return (PFN_vkVoidFunction)vkBindBufferMemory;
10574    if (!strcmp(funcName, "vkGetBufferMemoryRequirements"))
10575        return (PFN_vkVoidFunction)vkGetBufferMemoryRequirements;
10576    if (!strcmp(funcName, "vkGetImageMemoryRequirements"))
10577        return (PFN_vkVoidFunction)vkGetImageMemoryRequirements;
10578#endif
10579    if (!strcmp(funcName, "vkGetQueryPoolResults"))
10580        return (PFN_vkVoidFunction)vkGetQueryPoolResults;
10581    if (!strcmp(funcName, "vkBindImageMemory"))
10582        return (PFN_vkVoidFunction)vkBindImageMemory;
10583    if (!strcmp(funcName, "vkQueueBindSparse"))
10584        return (PFN_vkVoidFunction)vkQueueBindSparse;
10585    if (!strcmp(funcName, "vkCreateSemaphore"))
10586        return (PFN_vkVoidFunction)vkCreateSemaphore;
10587    if (!strcmp(funcName, "vkCreateEvent"))
10588        return (PFN_vkVoidFunction)vkCreateEvent;
10589
10590    if (dev == NULL)
10591        return NULL;
10592
10593    layer_data *dev_data;
10594    dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
10595
10596    if (dev_data->device_extensions.wsi_enabled) {
10597        if (!strcmp(funcName, "vkCreateSwapchainKHR"))
10598            return (PFN_vkVoidFunction)vkCreateSwapchainKHR;
10599        if (!strcmp(funcName, "vkDestroySwapchainKHR"))
10600            return (PFN_vkVoidFunction)vkDestroySwapchainKHR;
10601        if (!strcmp(funcName, "vkGetSwapchainImagesKHR"))
10602            return (PFN_vkVoidFunction)vkGetSwapchainImagesKHR;
10603        if (!strcmp(funcName, "vkAcquireNextImageKHR"))
10604            return (PFN_vkVoidFunction)vkAcquireNextImageKHR;
10605        if (!strcmp(funcName, "vkQueuePresentKHR"))
10606            return (PFN_vkVoidFunction)vkQueuePresentKHR;
10607    }
10608
10609    VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
10610    {
10611        if (pTable->GetDeviceProcAddr == NULL)
10612            return NULL;
10613        return pTable->GetDeviceProcAddr(dev, funcName);
10614    }
10615}
10616
10617VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
10618    if (!strcmp(funcName, "vkGetInstanceProcAddr"))
10619        return (PFN_vkVoidFunction)vkGetInstanceProcAddr;
10620    if (!strcmp(funcName, "vkGetDeviceProcAddr"))
10621        return (PFN_vkVoidFunction)vkGetDeviceProcAddr;
10622    if (!strcmp(funcName, "vkCreateInstance"))
10623        return (PFN_vkVoidFunction)vkCreateInstance;
10624    if (!strcmp(funcName, "vkCreateDevice"))
10625        return (PFN_vkVoidFunction)vkCreateDevice;
10626    if (!strcmp(funcName, "vkDestroyInstance"))
10627        return (PFN_vkVoidFunction)vkDestroyInstance;
10628    if (!strcmp(funcName, "vkEnumerateInstanceLayerProperties"))
10629        return (PFN_vkVoidFunction)vkEnumerateInstanceLayerProperties;
10630    if (!strcmp(funcName, "vkEnumerateInstanceExtensionProperties"))
10631        return (PFN_vkVoidFunction)vkEnumerateInstanceExtensionProperties;
10632    if (!strcmp(funcName, "vkEnumerateDeviceLayerProperties"))
10633        return (PFN_vkVoidFunction)vkEnumerateDeviceLayerProperties;
10634    if (!strcmp(funcName, "vkEnumerateDeviceExtensionProperties"))
10635        return (PFN_vkVoidFunction)vkEnumerateDeviceExtensionProperties;
10636
10637    if (instance == NULL)
10638        return NULL;
10639
10640    PFN_vkVoidFunction fptr;
10641
10642    layer_data *my_data;
10643    my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10644    fptr = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
10645    if (fptr)
10646        return fptr;
10647
10648    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10649    if (pTable->GetInstanceProcAddr == NULL)
10650        return NULL;
10651    return pTable->GetInstanceProcAddr(instance, funcName);
10652}
10653