core_validation.cpp revision dc7c45f01ae5690f7c969b4760463c1a6bac52d5
1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 */
26
27// Allow use of STL min and max functions in Windows
28#define NOMINMAX
29
30// Turn on mem_tracker merged code
31#define MTMERGESOURCE 1
32
33#include <SPIRV/spirv.hpp>
34#include <algorithm>
35#include <assert.h>
36#include <iostream>
37#include <list>
38#include <map>
39#include <mutex>
40#include <set>
41//#include <memory>
42#include <stdio.h>
43#include <stdlib.h>
44#include <string.h>
45#include <string>
46#include <tuple>
47
48#include "vk_loader_platform.h"
49#include "vk_dispatch_table_helper.h"
50#include "vk_struct_string_helper_cpp.h"
51#if defined(__GNUC__)
52#pragma GCC diagnostic ignored "-Wwrite-strings"
53#endif
54#if defined(__GNUC__)
55#pragma GCC diagnostic warning "-Wwrite-strings"
56#endif
57#include "vk_struct_size_helper.h"
58#include "core_validation.h"
59#include "vk_layer_table.h"
60#include "vk_layer_data.h"
61#include "vk_layer_extension_utils.h"
62#include "vk_layer_utils.h"
63#include "spirv-tools/libspirv.h"
64
65#if defined __ANDROID__
66#include <android/log.h>
67#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
68#else
69#define LOGCONSOLE(...)                                                                                                            \
70    {                                                                                                                              \
71        printf(__VA_ARGS__);                                                                                                       \
72        printf("\n");                                                                                                              \
73    }
74#endif
75
76using namespace std;
77
78// TODO : CB really needs it's own class and files so this is just temp code until that happens
79GLOBAL_CB_NODE::~GLOBAL_CB_NODE() {
80    for (uint32_t i=0; i<VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
81        // Make sure that no sets hold onto deleted CB binding
82        for (auto set : lastBound[i].uniqueBoundSets) {
83            set->RemoveBoundCommandBuffer(this);
84        }
85    }
86}
87
88namespace core_validation {
89
90using std::unordered_map;
91using std::unordered_set;
92
93// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
94// Object value will be used to identify them internally.
95static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
96
97struct devExts {
98    bool wsi_enabled;
99    unordered_map<VkSwapchainKHR, unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
100    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
101};
102
103// fwd decls
104struct shader_module;
105
106// TODO : Split this into separate structs for instance and device level data?
107struct layer_data {
108    VkInstance instance;
109    unique_ptr<INSTANCE_STATE> instance_state;
110
111
112    debug_report_data *report_data;
113    std::vector<VkDebugReportCallbackEXT> logging_callback;
114    VkLayerDispatchTable *device_dispatch_table;
115    VkLayerInstanceDispatchTable *instance_dispatch_table;
116
117    devExts device_extensions;
118    unordered_set<VkQueue> queues;  // All queues under given device
119    // Vector indices correspond to queueFamilyIndex
120    vector<unique_ptr<VkQueueFamilyProperties>> queue_family_properties;
121    // Global set of all cmdBuffers that are inFlight on this device
122    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
123    // Layer specific data
124    unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> samplerMap;
125    unordered_map<VkImageView, unique_ptr<VkImageViewCreateInfo>> imageViewMap;
126    unordered_map<VkImage, unique_ptr<IMAGE_NODE>> imageMap;
127    unordered_map<VkBufferView, unique_ptr<VkBufferViewCreateInfo>> bufferViewMap;
128    unordered_map<VkBuffer, unique_ptr<BUFFER_NODE>> bufferMap;
129    unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
130    unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
131    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
132    unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
133    unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
134    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
135    unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
136    unordered_map<VkFence, FENCE_NODE> fenceMap;
137    unordered_map<VkQueue, QUEUE_NODE> queueMap;
138    unordered_map<VkEvent, EVENT_NODE> eventMap;
139    unordered_map<QueryObject, bool> queryToStateMap;
140    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
141    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
142    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
143    unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_NODE>> frameBufferMap;
144    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
145    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
146    unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
147    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
148    VkDevice device;
149
150    // Device specific data
151    PHYS_DEV_PROPERTIES_NODE phys_dev_properties;
152    VkPhysicalDeviceMemoryProperties phys_dev_mem_props;
153    VkPhysicalDeviceFeatures physical_device_features;
154    unique_ptr<PHYSICAL_DEVICE_STATE> physical_device_state;
155
156    layer_data()
157        : instance_state(nullptr), report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr),
158          device_extensions(), device(VK_NULL_HANDLE), phys_dev_properties{}, phys_dev_mem_props{}, physical_device_features{},
159          physical_device_state(nullptr){};
160};
161
162// TODO : Do we need to guard access to layer_data_map w/ lock?
163static unordered_map<void *, layer_data *> layer_data_map;
164
165static const VkLayerProperties global_layer = {
166    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
167};
168
169template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
170    bool foundLayer = false;
171    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
172        if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
173            foundLayer = true;
174        }
175        // This has to be logged to console as we don't have a callback at this point.
176        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
177            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
178                       global_layer.layerName);
179        }
180    }
181}
182
183// Code imported from shader_checker
184static void build_def_index(shader_module *);
185
186// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
187// without the caller needing to care too much about the physical SPIRV module layout.
188struct spirv_inst_iter {
189    std::vector<uint32_t>::const_iterator zero;
190    std::vector<uint32_t>::const_iterator it;
191
192    uint32_t len() {
193        auto result = *it >> 16;
194        assert(result > 0);
195        return result;
196    }
197
198    uint32_t opcode() { return *it & 0x0ffffu; }
199
200    uint32_t const &word(unsigned n) {
201        assert(n < len());
202        return it[n];
203    }
204
205    uint32_t offset() { return (uint32_t)(it - zero); }
206
207    spirv_inst_iter() {}
208
209    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
210
211    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
212
213    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
214
215    spirv_inst_iter operator++(int) { /* x++ */
216        spirv_inst_iter ii = *this;
217        it += len();
218        return ii;
219    }
220
221    spirv_inst_iter operator++() { /* ++x; */
222        it += len();
223        return *this;
224    }
225
226    /* The iterator and the value are the same thing. */
227    spirv_inst_iter &operator*() { return *this; }
228    spirv_inst_iter const &operator*() const { return *this; }
229};
230
231struct shader_module {
232    /* the spirv image itself */
233    vector<uint32_t> words;
234    /* a mapping of <id> to the first word of its def. this is useful because walking type
235     * trees, constant expressions, etc requires jumping all over the instruction stream.
236     */
237    unordered_map<unsigned, unsigned> def_index;
238
239    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
240        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
241          def_index() {
242
243        build_def_index(this);
244    }
245
246    /* expose begin() / end() to enable range-based for */
247    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
248    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
249    /* given an offset into the module, produce an iterator there. */
250    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
251
252    /* gets an iterator to the definition of an id */
253    spirv_inst_iter get_def(unsigned id) const {
254        auto it = def_index.find(id);
255        if (it == def_index.end()) {
256            return end();
257        }
258        return at(it->second);
259    }
260};
261
262// TODO : This can be much smarter, using separate locks for separate global data
263static std::mutex global_lock;
264
265// Return ImageViewCreateInfo ptr for specified imageView or else NULL
266VkImageViewCreateInfo *getImageViewData(const layer_data *dev_data, VkImageView image_view) {
267    auto iv_it = dev_data->imageViewMap.find(image_view);
268    if (iv_it == dev_data->imageViewMap.end()) {
269        return nullptr;
270    }
271    return iv_it->second.get();
272}
273// Return sampler node ptr for specified sampler or else NULL
274SAMPLER_NODE *getSamplerNode(const layer_data *dev_data, VkSampler sampler) {
275    auto sampler_it = dev_data->samplerMap.find(sampler);
276    if (sampler_it == dev_data->samplerMap.end()) {
277        return nullptr;
278    }
279    return sampler_it->second.get();
280}
281// Return image node ptr for specified image or else NULL
282IMAGE_NODE *getImageNode(const layer_data *dev_data, VkImage image) {
283    auto img_it = dev_data->imageMap.find(image);
284    if (img_it == dev_data->imageMap.end()) {
285        return nullptr;
286    }
287    return img_it->second.get();
288}
289// Return buffer node ptr for specified buffer or else NULL
290BUFFER_NODE *getBufferNode(const layer_data *dev_data, VkBuffer buffer) {
291    auto buff_it = dev_data->bufferMap.find(buffer);
292    if (buff_it == dev_data->bufferMap.end()) {
293        return nullptr;
294    }
295    return buff_it->second.get();
296}
297// Return swapchain node for specified swapchain or else NULL
298SWAPCHAIN_NODE *getSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
299    auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain);
300    if (swp_it == dev_data->device_extensions.swapchainMap.end()) {
301        return nullptr;
302    }
303    return swp_it->second.get();
304}
305// Return swapchain for specified image or else NULL
306VkSwapchainKHR getSwapchainFromImage(const layer_data *dev_data, VkImage image) {
307    auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image);
308    if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) {
309        return VK_NULL_HANDLE;
310    }
311    return img_it->second;
312}
313// Return buffer node ptr for specified buffer or else NULL
314VkBufferViewCreateInfo *getBufferViewInfo(const layer_data *my_data, VkBufferView buffer_view) {
315    auto bv_it = my_data->bufferViewMap.find(buffer_view);
316    if (bv_it == my_data->bufferViewMap.end()) {
317        return nullptr;
318    }
319    return bv_it->second.get();
320}
321
322FENCE_NODE *getFenceNode(layer_data *dev_data, VkFence fence) {
323    auto it = dev_data->fenceMap.find(fence);
324    if (it == dev_data->fenceMap.end()) {
325        return nullptr;
326    }
327    return &it->second;
328}
329
330QUEUE_NODE *getQueueNode(layer_data *dev_data, VkQueue queue) {
331    auto it = dev_data->queueMap.find(queue);
332    if (it == dev_data->queueMap.end()) {
333        return nullptr;
334    }
335    return &it->second;
336}
337
338SEMAPHORE_NODE *getSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
339    auto it = dev_data->semaphoreMap.find(semaphore);
340    if (it == dev_data->semaphoreMap.end()) {
341        return nullptr;
342    }
343    return &it->second;
344}
345
346COMMAND_POOL_NODE *getCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
347    auto it = dev_data->commandPoolMap.find(pool);
348    if (it == dev_data->commandPoolMap.end()) {
349        return nullptr;
350    }
351    return &it->second;
352}
353
354static VkDeviceMemory *get_object_mem_binding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
355    switch (type) {
356    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
357        auto img_node = getImageNode(my_data, VkImage(handle));
358        if (img_node)
359            return &img_node->mem;
360        break;
361    }
362    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
363        auto buff_node = getBufferNode(my_data, VkBuffer(handle));
364        if (buff_node)
365            return &buff_node->mem;
366        break;
367    }
368    default:
369        break;
370    }
371    return nullptr;
372}
373
374// prototype
375static GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer);
376
377// Helper function to validate correct usage bits set for buffers or images
378//  Verify that (actual & desired) flags != 0 or,
379//   if strict is true, verify that (actual & desired) flags == desired
380//  In case of error, report it via dbg callbacks
381static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict,
382                                     uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
383                                     char const *func_name, char const *usage_str) {
384    bool correct_usage = false;
385    bool skipCall = false;
386    if (strict)
387        correct_usage = ((actual & desired) == desired);
388    else
389        correct_usage = ((actual & desired) != 0);
390    if (!correct_usage) {
391        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
392                           MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s 0x%" PRIxLEAST64
393                                                               " used by %s. In this case, %s should have %s set during creation.",
394                           ty_str, obj_handle, func_name, ty_str, usage_str);
395    }
396    return skipCall;
397}
398
399// Helper function to validate usage flags for images
400// Pulls image info and then sends actual vs. desired usage off to helper above where
401//  an error will be flagged if usage is not correct
402static bool validate_image_usage_flags(layer_data *dev_data, VkImage image, VkFlags desired, VkBool32 strict,
403                                           char const *func_name, char const *usage_string) {
404    bool skipCall = false;
405    auto const image_node = getImageNode(dev_data, image);
406    if (image_node) {
407        skipCall = validate_usage_flags(dev_data, image_node->createInfo.usage, desired, strict, (uint64_t)image,
408                                        VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "image", func_name, usage_string);
409    }
410    return skipCall;
411}
412
413// Helper function to validate usage flags for buffers
414// Pulls buffer info and then sends actual vs. desired usage off to helper above where
415//  an error will be flagged if usage is not correct
416static bool validate_buffer_usage_flags(layer_data *dev_data, VkBuffer buffer, VkFlags desired, VkBool32 strict,
417                                            char const *func_name, char const *usage_string) {
418    bool skipCall = false;
419    auto buffer_node = getBufferNode(dev_data, buffer);
420    if (buffer_node) {
421        skipCall = validate_usage_flags(dev_data, buffer_node->createInfo.usage, desired, strict, (uint64_t)buffer,
422                                        VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string);
423    }
424    return skipCall;
425}
426
427// Return ptr to info in map container containing mem, or NULL if not found
428//  Calls to this function should be wrapped in mutex
429DEVICE_MEM_INFO *getMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
430    auto mem_it = dev_data->memObjMap.find(mem);
431    if (mem_it == dev_data->memObjMap.end()) {
432        return NULL;
433    }
434    return mem_it->second.get();
435}
436
437static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
438                             const VkMemoryAllocateInfo *pAllocateInfo) {
439    assert(object != NULL);
440
441    my_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
442}
443
444static bool validate_memory_is_valid(layer_data *dev_data, VkDeviceMemory mem, const char *functionName,
445                                     VkImage image = VK_NULL_HANDLE) {
446    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
447        auto const image_node = getImageNode(dev_data, image);
448        if (image_node && !image_node->valid) {
449            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
450                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
451                           "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
452                           functionName, (uint64_t)(image));
453        }
454    } else {
455        DEVICE_MEM_INFO *pMemObj = getMemObjInfo(dev_data, mem);
456        if (pMemObj && !pMemObj->valid) {
457            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
458                           (uint64_t)(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
459                           "%s: Cannot read invalid memory 0x%" PRIx64 ", please fill the memory before using.", functionName,
460                           (uint64_t)(mem));
461        }
462    }
463    return false;
464}
465
466static void set_memory_valid(layer_data *dev_data, VkDeviceMemory mem, bool valid, VkImage image = VK_NULL_HANDLE) {
467    if (mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
468        auto image_node = getImageNode(dev_data, image);
469        if (image_node) {
470            image_node->valid = valid;
471        }
472    } else {
473        DEVICE_MEM_INFO *pMemObj = getMemObjInfo(dev_data, mem);
474        if (pMemObj) {
475            pMemObj->valid = valid;
476        }
477    }
478}
479
480// Find CB Info and add mem reference to list container
481// Find Mem Obj Info and add CB reference to list container
482static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
483                                              const char *apiName) {
484    bool skipCall = false;
485
486    // Skip validation if this image was created through WSI
487    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
488
489        // First update CB binding in MemObj mini CB list
490        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
491        if (pMemInfo) {
492            pMemInfo->commandBufferBindings.insert(cb);
493            // Now update CBInfo's Mem reference list
494            GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
495            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
496            if (pCBNode) {
497                pCBNode->memObjs.insert(mem);
498            }
499        }
500    }
501    return skipCall;
502}
503// For every mem obj bound to particular CB, free bindings related to that CB
504static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *pCBNode) {
505    if (pCBNode) {
506        if (pCBNode->memObjs.size() > 0) {
507            for (auto mem : pCBNode->memObjs) {
508                DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
509                if (pInfo) {
510                    pInfo->commandBufferBindings.erase(pCBNode->commandBuffer);
511                }
512            }
513            pCBNode->memObjs.clear();
514        }
515        pCBNode->validate_functions.clear();
516    }
517}
518// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
519static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
520    clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
521}
522
523// For given MemObjInfo, report Obj & CB bindings
524static bool reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
525    bool skipCall = false;
526    size_t cmdBufRefCount = pMemObjInfo->commandBufferBindings.size();
527    size_t objRefCount = pMemObjInfo->objBindings.size();
528
529    if ((pMemObjInfo->commandBufferBindings.size()) != 0) {
530        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
531                           (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
532                           "Attempting to free memory object 0x%" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
533                           " references",
534                           (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
535    }
536
537    if (cmdBufRefCount > 0 && pMemObjInfo->commandBufferBindings.size() > 0) {
538        for (auto cb : pMemObjInfo->commandBufferBindings) {
539            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
540                    (uint64_t)cb, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
541                    "Command Buffer 0x%p still has a reference to mem obj 0x%" PRIxLEAST64, cb, (uint64_t)pMemObjInfo->mem);
542        }
543        // Clear the list of hanging references
544        pMemObjInfo->commandBufferBindings.clear();
545    }
546
547    if (objRefCount > 0 && pMemObjInfo->objBindings.size() > 0) {
548        for (auto obj : pMemObjInfo->objBindings) {
549            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__,
550                    MEMTRACK_FREED_MEM_REF, "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64,
551                    obj.handle, (uint64_t)pMemObjInfo->mem);
552        }
553        // Clear the list of hanging references
554        pMemObjInfo->objBindings.clear();
555    }
556    return skipCall;
557}
558
559static bool deleteMemObjInfo(layer_data *my_data, void *object, VkDeviceMemory mem) {
560    bool skipCall = false;
561    auto item = my_data->memObjMap.find(mem);
562    if (item != my_data->memObjMap.end()) {
563        my_data->memObjMap.erase(item);
564    } else {
565        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
566                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
567                           "Request to delete memory object 0x%" PRIxLEAST64 " not present in memory Object Map", (uint64_t)mem);
568    }
569    return skipCall;
570}
571
572static bool freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, bool internal) {
573    bool skipCall = false;
574    // Parse global list to find info w/ mem
575    DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
576    if (pInfo) {
577        if (pInfo->allocInfo.allocationSize == 0 && !internal) {
578            // TODO: Verify against Valid Use section
579            skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
580                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MEM_OBJ, "MEM",
581                               "Attempting to free memory associated with a Persistent Image, 0x%" PRIxLEAST64 ", "
582                               "this should not be explicitly freed\n",
583                               (uint64_t)mem);
584        } else {
585            // Clear any CB bindings for completed CBs
586            //   TODO : Is there a better place to do this?
587
588            assert(pInfo->object != VK_NULL_HANDLE);
589            // clear_cmd_buf_and_mem_references removes elements from
590            // pInfo->commandBufferBindings -- this copy not needed in c++14,
591            // and probably not needed in practice in c++11
592            auto bindings = pInfo->commandBufferBindings;
593            for (auto cb : bindings) {
594                if (!dev_data->globalInFlightCmdBuffers.count(cb)) {
595                    clear_cmd_buf_and_mem_references(dev_data, cb);
596                }
597            }
598
599            // Now verify that no references to this mem obj remain and remove bindings
600            if (pInfo->commandBufferBindings.size() || pInfo->objBindings.size()) {
601                skipCall |= reportMemReferencesAndCleanUp(dev_data, pInfo);
602            }
603            // Delete mem obj info
604            skipCall |= deleteMemObjInfo(dev_data, object, mem);
605        }
606    }
607    return skipCall;
608}
609
610static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
611    switch (type) {
612    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
613        return "image";
614    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
615        return "buffer";
616    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
617        return "swapchain";
618    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT:
619        return "descriptor set";
620    case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT:
621        return "buffer";
622    default:
623        return "unknown";
624    }
625}
626
627// Remove object binding performs 3 tasks:
628// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
629// 2. Clear mem binding for image/buffer by setting its handle to 0
630// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
631static bool clear_object_binding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
632    // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
633    bool skipCall = false;
634    VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
635    if (pMemBinding) {
636        DEVICE_MEM_INFO *pMemObjInfo = getMemObjInfo(dev_data, *pMemBinding);
637        // TODO : Make sure this is a reasonable way to reset mem binding
638        *pMemBinding = VK_NULL_HANDLE;
639        if (pMemObjInfo) {
640            // This obj is bound to a memory object. Remove the reference to this object in that memory object's list,
641            // and set the objects memory binding pointer to NULL.
642            if (!pMemObjInfo->objBindings.erase({handle, type})) {
643                skipCall |=
644                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
645                            "MEM", "While trying to clear mem binding for %s obj 0x%" PRIxLEAST64
646                                   ", unable to find that object referenced by mem obj 0x%" PRIxLEAST64,
647                            object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
648            }
649        }
650    }
651    return skipCall;
652}
653
654// For NULL mem case, output warning
655// Make sure given object is in global object map
656//  IF a previous binding existed, output validation error
657//  Otherwise, add reference from objectInfo to memoryInfo
658//  Add reference off of objInfo
659static bool set_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
660                                VkDebugReportObjectTypeEXT type, const char *apiName) {
661    bool skipCall = false;
662    // Handle NULL case separately, just clear previous binding & decrement reference
663    if (mem == VK_NULL_HANDLE) {
664        // TODO: Verify against Valid Use section of spec.
665        skipCall = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
666                           "MEM", "In %s, attempting to Bind Obj(0x%" PRIxLEAST64 ") to NULL", apiName, handle);
667    } else {
668        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
669        assert(pMemBinding);
670        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
671        if (pMemInfo) {
672            DEVICE_MEM_INFO *pPrevBinding = getMemObjInfo(dev_data, *pMemBinding);
673            if (pPrevBinding != NULL) {
674                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
675                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT,
676                                    "MEM", "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
677                                           ") which has already been bound to mem object 0x%" PRIxLEAST64,
678                                    apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem);
679            } else {
680                pMemInfo->objBindings.insert({handle, type});
681                // For image objects, make sure default memory state is correctly set
682                // TODO : What's the best/correct way to handle this?
683                if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
684                    auto const image_node = getImageNode(dev_data, VkImage(handle));
685                    if (image_node) {
686                        VkImageCreateInfo ici = image_node->createInfo;
687                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
688                            // TODO::  More memory state transition stuff.
689                        }
690                    }
691                }
692                *pMemBinding = mem;
693            }
694        }
695    }
696    return skipCall;
697}
698
699// For NULL mem case, clear any previous binding Else...
700// Make sure given object is in its object map
701//  IF a previous binding existed, update binding
702//  Add reference from objectInfo to memoryInfo
703//  Add reference off of object's binding info
704// Return VK_TRUE if addition is successful, VK_FALSE otherwise
705static bool set_sparse_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
706                                       VkDebugReportObjectTypeEXT type, const char *apiName) {
707    bool skipCall = VK_FALSE;
708    // Handle NULL case separately, just clear previous binding & decrement reference
709    if (mem == VK_NULL_HANDLE) {
710        skipCall = clear_object_binding(dev_data, handle, type);
711    } else {
712        VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
713        assert(pMemBinding);
714        DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
715        if (pInfo) {
716            pInfo->objBindings.insert({handle, type});
717            // Need to set mem binding for this object
718            *pMemBinding = mem;
719        }
720    }
721    return skipCall;
722}
723
724// For handle of given object type, return memory binding
725static bool get_mem_for_type(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
726    bool skip_call = false;
727    *mem = VK_NULL_HANDLE;
728    switch (type) {
729    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
730        *mem = getImageNode(dev_data, VkImage(handle))->mem;
731        break;
732    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
733        *mem = getBufferNode(dev_data, VkBuffer(handle))->mem;
734        break;
735    default:
736        assert(0);
737    }
738    if (!*mem) {
739        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
740                            "MEM", "Trying to get mem binding for %s object 0x%" PRIxLEAST64
741                                   " but binding is NULL. Has memory been bound to this object?",
742                            object_type_to_string(type), handle);
743    }
744    return skip_call;
745}
746
747// Get memory binding for given image
748static bool getImageMemory(layer_data *dev_data, VkImage handle, VkDeviceMemory *mem) {
749    return get_mem_for_type(dev_data, reinterpret_cast<uint64_t &>(handle), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, mem);
750}
751
752// Get memory binding for given buffer
753static bool getBufferMemory(layer_data *dev_data, VkBuffer handle, VkDeviceMemory *mem) {
754    return get_mem_for_type(dev_data, reinterpret_cast<uint64_t &>(handle), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, mem);
755}
756
757// Print details of MemObjInfo list
758static void print_mem_list(layer_data *dev_data) {
759    // Early out if info is not requested
760    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
761        return;
762    }
763
764    // Just printing each msg individually for now, may want to package these into single large print
765    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
766            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
767            dev_data->memObjMap.size());
768    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
769            MEMTRACK_NONE, "MEM", "=============================");
770
771    if (dev_data->memObjMap.size() <= 0)
772        return;
773
774    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
775        auto mem_info = (*ii).second.get();
776
777        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
778                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at 0x%p===", (void *)mem_info);
779        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
780                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: 0x%" PRIxLEAST64, (uint64_t)(mem_info->mem));
781        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
782                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: " PRINTF_SIZE_T_SPECIFIER,
783                mem_info->commandBufferBindings.size() + mem_info->objBindings.size());
784        if (0 != mem_info->allocInfo.allocationSize) {
785            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&mem_info->allocInfo, "MEM(INFO):         ");
786            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
787                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
788        } else {
789            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
790                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
791        }
792
793        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
794                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
795                mem_info->objBindings.size());
796        if (mem_info->objBindings.size() > 0) {
797            for (auto obj : mem_info->objBindings) {
798                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
799                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT 0x%" PRIx64, obj.handle);
800            }
801        }
802
803        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
804                __LINE__, MEMTRACK_NONE, "MEM",
805                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
806                mem_info->commandBufferBindings.size());
807        if (mem_info->commandBufferBindings.size() > 0) {
808            for (auto cb : mem_info->commandBufferBindings) {
809                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
810                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB 0x%p", cb);
811            }
812        }
813    }
814}
815
816static void printCBList(layer_data *my_data) {
817    GLOBAL_CB_NODE *pCBInfo = NULL;
818
819    // Early out if info is not requested
820    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
821        return;
822    }
823
824    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
825            MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
826            my_data->commandBufferMap.size());
827    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
828            MEMTRACK_NONE, "MEM", "==================");
829
830    if (my_data->commandBufferMap.size() <= 0)
831        return;
832
833    for (auto &cb_node : my_data->commandBufferMap) {
834        pCBInfo = cb_node.second;
835
836        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
837                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (0x%p) has CB 0x%p", (void *)pCBInfo, (void *)pCBInfo->commandBuffer);
838
839        if (pCBInfo->memObjs.size() <= 0)
840            continue;
841        for (auto obj : pCBInfo->memObjs) {
842            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
843                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj 0x%" PRIx64, (uint64_t)obj);
844        }
845    }
846}
847
848// Return a string representation of CMD_TYPE enum
849static string cmdTypeToString(CMD_TYPE cmd) {
850    switch (cmd) {
851    case CMD_BINDPIPELINE:
852        return "CMD_BINDPIPELINE";
853    case CMD_BINDPIPELINEDELTA:
854        return "CMD_BINDPIPELINEDELTA";
855    case CMD_SETVIEWPORTSTATE:
856        return "CMD_SETVIEWPORTSTATE";
857    case CMD_SETLINEWIDTHSTATE:
858        return "CMD_SETLINEWIDTHSTATE";
859    case CMD_SETDEPTHBIASSTATE:
860        return "CMD_SETDEPTHBIASSTATE";
861    case CMD_SETBLENDSTATE:
862        return "CMD_SETBLENDSTATE";
863    case CMD_SETDEPTHBOUNDSSTATE:
864        return "CMD_SETDEPTHBOUNDSSTATE";
865    case CMD_SETSTENCILREADMASKSTATE:
866        return "CMD_SETSTENCILREADMASKSTATE";
867    case CMD_SETSTENCILWRITEMASKSTATE:
868        return "CMD_SETSTENCILWRITEMASKSTATE";
869    case CMD_SETSTENCILREFERENCESTATE:
870        return "CMD_SETSTENCILREFERENCESTATE";
871    case CMD_BINDDESCRIPTORSETS:
872        return "CMD_BINDDESCRIPTORSETS";
873    case CMD_BINDINDEXBUFFER:
874        return "CMD_BINDINDEXBUFFER";
875    case CMD_BINDVERTEXBUFFER:
876        return "CMD_BINDVERTEXBUFFER";
877    case CMD_DRAW:
878        return "CMD_DRAW";
879    case CMD_DRAWINDEXED:
880        return "CMD_DRAWINDEXED";
881    case CMD_DRAWINDIRECT:
882        return "CMD_DRAWINDIRECT";
883    case CMD_DRAWINDEXEDINDIRECT:
884        return "CMD_DRAWINDEXEDINDIRECT";
885    case CMD_DISPATCH:
886        return "CMD_DISPATCH";
887    case CMD_DISPATCHINDIRECT:
888        return "CMD_DISPATCHINDIRECT";
889    case CMD_COPYBUFFER:
890        return "CMD_COPYBUFFER";
891    case CMD_COPYIMAGE:
892        return "CMD_COPYIMAGE";
893    case CMD_BLITIMAGE:
894        return "CMD_BLITIMAGE";
895    case CMD_COPYBUFFERTOIMAGE:
896        return "CMD_COPYBUFFERTOIMAGE";
897    case CMD_COPYIMAGETOBUFFER:
898        return "CMD_COPYIMAGETOBUFFER";
899    case CMD_CLONEIMAGEDATA:
900        return "CMD_CLONEIMAGEDATA";
901    case CMD_UPDATEBUFFER:
902        return "CMD_UPDATEBUFFER";
903    case CMD_FILLBUFFER:
904        return "CMD_FILLBUFFER";
905    case CMD_CLEARCOLORIMAGE:
906        return "CMD_CLEARCOLORIMAGE";
907    case CMD_CLEARATTACHMENTS:
908        return "CMD_CLEARCOLORATTACHMENT";
909    case CMD_CLEARDEPTHSTENCILIMAGE:
910        return "CMD_CLEARDEPTHSTENCILIMAGE";
911    case CMD_RESOLVEIMAGE:
912        return "CMD_RESOLVEIMAGE";
913    case CMD_SETEVENT:
914        return "CMD_SETEVENT";
915    case CMD_RESETEVENT:
916        return "CMD_RESETEVENT";
917    case CMD_WAITEVENTS:
918        return "CMD_WAITEVENTS";
919    case CMD_PIPELINEBARRIER:
920        return "CMD_PIPELINEBARRIER";
921    case CMD_BEGINQUERY:
922        return "CMD_BEGINQUERY";
923    case CMD_ENDQUERY:
924        return "CMD_ENDQUERY";
925    case CMD_RESETQUERYPOOL:
926        return "CMD_RESETQUERYPOOL";
927    case CMD_COPYQUERYPOOLRESULTS:
928        return "CMD_COPYQUERYPOOLRESULTS";
929    case CMD_WRITETIMESTAMP:
930        return "CMD_WRITETIMESTAMP";
931    case CMD_INITATOMICCOUNTERS:
932        return "CMD_INITATOMICCOUNTERS";
933    case CMD_LOADATOMICCOUNTERS:
934        return "CMD_LOADATOMICCOUNTERS";
935    case CMD_SAVEATOMICCOUNTERS:
936        return "CMD_SAVEATOMICCOUNTERS";
937    case CMD_BEGINRENDERPASS:
938        return "CMD_BEGINRENDERPASS";
939    case CMD_ENDRENDERPASS:
940        return "CMD_ENDRENDERPASS";
941    default:
942        return "UNKNOWN";
943    }
944}
945
946// SPIRV utility functions
947static void build_def_index(shader_module *module) {
948    for (auto insn : *module) {
949        switch (insn.opcode()) {
950        /* Types */
951        case spv::OpTypeVoid:
952        case spv::OpTypeBool:
953        case spv::OpTypeInt:
954        case spv::OpTypeFloat:
955        case spv::OpTypeVector:
956        case spv::OpTypeMatrix:
957        case spv::OpTypeImage:
958        case spv::OpTypeSampler:
959        case spv::OpTypeSampledImage:
960        case spv::OpTypeArray:
961        case spv::OpTypeRuntimeArray:
962        case spv::OpTypeStruct:
963        case spv::OpTypeOpaque:
964        case spv::OpTypePointer:
965        case spv::OpTypeFunction:
966        case spv::OpTypeEvent:
967        case spv::OpTypeDeviceEvent:
968        case spv::OpTypeReserveId:
969        case spv::OpTypeQueue:
970        case spv::OpTypePipe:
971            module->def_index[insn.word(1)] = insn.offset();
972            break;
973
974        /* Fixed constants */
975        case spv::OpConstantTrue:
976        case spv::OpConstantFalse:
977        case spv::OpConstant:
978        case spv::OpConstantComposite:
979        case spv::OpConstantSampler:
980        case spv::OpConstantNull:
981            module->def_index[insn.word(2)] = insn.offset();
982            break;
983
984        /* Specialization constants */
985        case spv::OpSpecConstantTrue:
986        case spv::OpSpecConstantFalse:
987        case spv::OpSpecConstant:
988        case spv::OpSpecConstantComposite:
989        case spv::OpSpecConstantOp:
990            module->def_index[insn.word(2)] = insn.offset();
991            break;
992
993        /* Variables */
994        case spv::OpVariable:
995            module->def_index[insn.word(2)] = insn.offset();
996            break;
997
998        /* Functions */
999        case spv::OpFunction:
1000            module->def_index[insn.word(2)] = insn.offset();
1001            break;
1002
1003        default:
1004            /* We don't care about any other defs for now. */
1005            break;
1006        }
1007    }
1008}
1009
1010static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1011    for (auto insn : *src) {
1012        if (insn.opcode() == spv::OpEntryPoint) {
1013            auto entrypointName = (char const *)&insn.word(3);
1014            auto entrypointStageBits = 1u << insn.word(1);
1015
1016            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1017                return insn;
1018            }
1019        }
1020    }
1021
1022    return src->end();
1023}
1024
1025static char const *storage_class_name(unsigned sc) {
1026    switch (sc) {
1027    case spv::StorageClassInput:
1028        return "input";
1029    case spv::StorageClassOutput:
1030        return "output";
1031    case spv::StorageClassUniformConstant:
1032        return "const uniform";
1033    case spv::StorageClassUniform:
1034        return "uniform";
1035    case spv::StorageClassWorkgroup:
1036        return "workgroup local";
1037    case spv::StorageClassCrossWorkgroup:
1038        return "workgroup global";
1039    case spv::StorageClassPrivate:
1040        return "private global";
1041    case spv::StorageClassFunction:
1042        return "function";
1043    case spv::StorageClassGeneric:
1044        return "generic";
1045    case spv::StorageClassAtomicCounter:
1046        return "atomic counter";
1047    case spv::StorageClassImage:
1048        return "image";
1049    case spv::StorageClassPushConstant:
1050        return "push constant";
1051    default:
1052        return "unknown";
1053    }
1054}
1055
1056/* get the value of an integral constant */
1057unsigned get_constant_value(shader_module const *src, unsigned id) {
1058    auto value = src->get_def(id);
1059    assert(value != src->end());
1060
1061    if (value.opcode() != spv::OpConstant) {
1062        /* TODO: Either ensure that the specialization transform is already performed on a module we're
1063            considering here, OR -- specialize on the fly now.
1064            */
1065        return 1;
1066    }
1067
1068    return value.word(3);
1069}
1070
1071
1072static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1073    auto insn = src->get_def(type);
1074    assert(insn != src->end());
1075
1076    switch (insn.opcode()) {
1077    case spv::OpTypeBool:
1078        ss << "bool";
1079        break;
1080    case spv::OpTypeInt:
1081        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1082        break;
1083    case spv::OpTypeFloat:
1084        ss << "float" << insn.word(2);
1085        break;
1086    case spv::OpTypeVector:
1087        ss << "vec" << insn.word(3) << " of ";
1088        describe_type_inner(ss, src, insn.word(2));
1089        break;
1090    case spv::OpTypeMatrix:
1091        ss << "mat" << insn.word(3) << " of ";
1092        describe_type_inner(ss, src, insn.word(2));
1093        break;
1094    case spv::OpTypeArray:
1095        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1096        describe_type_inner(ss, src, insn.word(2));
1097        break;
1098    case spv::OpTypePointer:
1099        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1100        describe_type_inner(ss, src, insn.word(3));
1101        break;
1102    case spv::OpTypeStruct: {
1103        ss << "struct of (";
1104        for (unsigned i = 2; i < insn.len(); i++) {
1105            describe_type_inner(ss, src, insn.word(i));
1106            if (i == insn.len() - 1) {
1107                ss << ")";
1108            } else {
1109                ss << ", ";
1110            }
1111        }
1112        break;
1113    }
1114    case spv::OpTypeSampler:
1115        ss << "sampler";
1116        break;
1117    case spv::OpTypeSampledImage:
1118        ss << "sampler+";
1119        describe_type_inner(ss, src, insn.word(2));
1120        break;
1121    case spv::OpTypeImage:
1122        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1123        break;
1124    default:
1125        ss << "oddtype";
1126        break;
1127    }
1128}
1129
1130
1131static std::string describe_type(shader_module const *src, unsigned type) {
1132    std::ostringstream ss;
1133    describe_type_inner(ss, src, type);
1134    return ss.str();
1135}
1136
1137
1138static bool is_narrow_numeric_type(spirv_inst_iter type)
1139{
1140    if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat)
1141        return false;
1142    return type.word(2) < 64;
1143}
1144
1145
1146static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) {
1147    /* walk two type trees together, and complain about differences */
1148    auto a_insn = a->get_def(a_type);
1149    auto b_insn = b->get_def(b_type);
1150    assert(a_insn != a->end());
1151    assert(b_insn != b->end());
1152
1153    if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1154        return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
1155    }
1156
1157    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1158        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1159        return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1160    }
1161
1162    if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1163        return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
1164    }
1165
1166    if (a_insn.opcode() != b_insn.opcode()) {
1167        return false;
1168    }
1169
1170    if (a_insn.opcode() == spv::OpTypePointer) {
1171        /* match on pointee type. storage class is expected to differ */
1172        return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
1173    }
1174
1175    if (a_arrayed || b_arrayed) {
1176        /* if we havent resolved array-of-verts by here, we're not going to. */
1177        return false;
1178    }
1179
1180    switch (a_insn.opcode()) {
1181    case spv::OpTypeBool:
1182        return true;
1183    case spv::OpTypeInt:
1184        /* match on width, signedness */
1185        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1186    case spv::OpTypeFloat:
1187        /* match on width */
1188        return a_insn.word(2) == b_insn.word(2);
1189    case spv::OpTypeVector:
1190        /* match on element type, count. */
1191        if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false))
1192            return false;
1193        if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1194            return a_insn.word(3) >= b_insn.word(3);
1195        }
1196        else {
1197            return a_insn.word(3) == b_insn.word(3);
1198        }
1199    case spv::OpTypeMatrix:
1200        /* match on element type, count. */
1201        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3);
1202    case spv::OpTypeArray:
1203        /* match on element type, count. these all have the same layout. we don't get here if
1204         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1205         * not a literal within OpTypeArray */
1206        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1207               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1208    case spv::OpTypeStruct:
1209        /* match on all element types */
1210        {
1211            if (a_insn.len() != b_insn.len()) {
1212                return false; /* structs cannot match if member counts differ */
1213            }
1214
1215            for (unsigned i = 2; i < a_insn.len(); i++) {
1216                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
1217                    return false;
1218                }
1219            }
1220
1221            return true;
1222        }
1223    default:
1224        /* remaining types are CLisms, or may not appear in the interfaces we
1225         * are interested in. Just claim no match.
1226         */
1227        return false;
1228    }
1229}
1230
1231static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1232    auto it = map.find(id);
1233    if (it == map.end())
1234        return def;
1235    else
1236        return it->second;
1237}
1238
1239static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1240    auto insn = src->get_def(type);
1241    assert(insn != src->end());
1242
1243    switch (insn.opcode()) {
1244    case spv::OpTypePointer:
1245        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1246         * we're never actually passing pointers around. */
1247        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1248    case spv::OpTypeArray:
1249        if (strip_array_level) {
1250            return get_locations_consumed_by_type(src, insn.word(2), false);
1251        } else {
1252            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1253        }
1254    case spv::OpTypeMatrix:
1255        /* num locations is the dimension * element size */
1256        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1257    case spv::OpTypeVector: {
1258        auto scalar_type = src->get_def(insn.word(2));
1259        auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ?
1260            scalar_type.word(2) : 32;
1261
1262        /* locations are 128-bit wide; 3- and 4-component vectors of 64 bit
1263         * types require two. */
1264        return (bit_width * insn.word(3) + 127) / 128;
1265    }
1266    default:
1267        /* everything else is just 1. */
1268        return 1;
1269
1270        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1271         * multiple locations. */
1272    }
1273}
1274
1275static unsigned get_locations_consumed_by_format(VkFormat format) {
1276    switch (format) {
1277    case VK_FORMAT_R64G64B64A64_SFLOAT:
1278    case VK_FORMAT_R64G64B64A64_SINT:
1279    case VK_FORMAT_R64G64B64A64_UINT:
1280    case VK_FORMAT_R64G64B64_SFLOAT:
1281    case VK_FORMAT_R64G64B64_SINT:
1282    case VK_FORMAT_R64G64B64_UINT:
1283        return 2;
1284    default:
1285        return 1;
1286    }
1287}
1288
1289typedef std::pair<unsigned, unsigned> location_t;
1290typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1291
1292struct interface_var {
1293    uint32_t id;
1294    uint32_t type_id;
1295    uint32_t offset;
1296    bool is_patch;
1297    bool is_block_member;
1298    /* TODO: collect the name, too? Isn't required to be present. */
1299};
1300
1301struct shader_stage_attributes {
1302    char const *const name;
1303    bool arrayed_input;
1304    bool arrayed_output;
1305};
1306
1307static shader_stage_attributes shader_stage_attribs[] = {
1308    {"vertex shader", false, false},
1309    {"tessellation control shader", true, true},
1310    {"tessellation evaluation shader", true, false},
1311    {"geometry shader", true, false},
1312    {"fragment shader", false, false},
1313};
1314
1315static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1316    while (true) {
1317
1318        if (def.opcode() == spv::OpTypePointer) {
1319            def = src->get_def(def.word(3));
1320        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1321            def = src->get_def(def.word(2));
1322            is_array_of_verts = false;
1323        } else if (def.opcode() == spv::OpTypeStruct) {
1324            return def;
1325        } else {
1326            return src->end();
1327        }
1328    }
1329}
1330
1331static void collect_interface_block_members(shader_module const *src,
1332                                            std::map<location_t, interface_var> &out,
1333                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1334                                            uint32_t id, uint32_t type_id, bool is_patch) {
1335    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1336    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1337    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1338        /* this isn't an interface block. */
1339        return;
1340    }
1341
1342    std::unordered_map<unsigned, unsigned> member_components;
1343
1344    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1345    for (auto insn : *src) {
1346        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1347            unsigned member_index = insn.word(2);
1348
1349            if (insn.word(3) == spv::DecorationComponent) {
1350                unsigned component = insn.word(4);
1351                member_components[member_index] = component;
1352            }
1353        }
1354    }
1355
1356    /* Second pass -- produce the output, from Location decorations */
1357    for (auto insn : *src) {
1358        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1359            unsigned member_index = insn.word(2);
1360            unsigned member_type_id = type.word(2 + member_index);
1361
1362            if (insn.word(3) == spv::DecorationLocation) {
1363                unsigned location = insn.word(4);
1364                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1365                auto component_it = member_components.find(member_index);
1366                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1367
1368                for (unsigned int offset = 0; offset < num_locations; offset++) {
1369                    interface_var v;
1370                    v.id = id;
1371                    /* TODO: member index in interface_var too? */
1372                    v.type_id = member_type_id;
1373                    v.offset = offset;
1374                    v.is_patch = is_patch;
1375                    v.is_block_member = true;
1376                    out[std::make_pair(location + offset, component)] = v;
1377                }
1378            }
1379        }
1380    }
1381}
1382
1383static void collect_interface_by_location(shader_module const *src, spirv_inst_iter entrypoint,
1384                                          spv::StorageClass sinterface, std::map<location_t, interface_var> &out,
1385                                          bool is_array_of_verts) {
1386    std::unordered_map<unsigned, unsigned> var_locations;
1387    std::unordered_map<unsigned, unsigned> var_builtins;
1388    std::unordered_map<unsigned, unsigned> var_components;
1389    std::unordered_map<unsigned, unsigned> blocks;
1390    std::unordered_map<unsigned, unsigned> var_patch;
1391
1392    for (auto insn : *src) {
1393
1394        /* We consider two interface models: SSO rendezvous-by-location, and
1395         * builtins. Complain about anything that fits neither model.
1396         */
1397        if (insn.opcode() == spv::OpDecorate) {
1398            if (insn.word(2) == spv::DecorationLocation) {
1399                var_locations[insn.word(1)] = insn.word(3);
1400            }
1401
1402            if (insn.word(2) == spv::DecorationBuiltIn) {
1403                var_builtins[insn.word(1)] = insn.word(3);
1404            }
1405
1406            if (insn.word(2) == spv::DecorationComponent) {
1407                var_components[insn.word(1)] = insn.word(3);
1408            }
1409
1410            if (insn.word(2) == spv::DecorationBlock) {
1411                blocks[insn.word(1)] = 1;
1412            }
1413
1414            if (insn.word(2) == spv::DecorationPatch) {
1415                var_patch[insn.word(1)] = 1;
1416            }
1417        }
1418    }
1419
1420    /* TODO: handle grouped decorations */
1421    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1422     * have the same location, and we DON'T want to clobber. */
1423
1424    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1425       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1426       the word to determine which word contains the terminator. */
1427    uint32_t word = 3;
1428    while (entrypoint.word(word) & 0xff000000u) {
1429        ++word;
1430    }
1431    ++word;
1432
1433    for (; word < entrypoint.len(); word++) {
1434        auto insn = src->get_def(entrypoint.word(word));
1435        assert(insn != src->end());
1436        assert(insn.opcode() == spv::OpVariable);
1437
1438        if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
1439            unsigned id = insn.word(2);
1440            unsigned type = insn.word(1);
1441
1442            int location = value_or_default(var_locations, id, -1);
1443            int builtin = value_or_default(var_builtins, id, -1);
1444            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1445            bool is_patch = var_patch.find(id) != var_patch.end();
1446
1447            /* All variables and interface block members in the Input or Output storage classes
1448             * must be decorated with either a builtin or an explicit location.
1449             *
1450             * TODO: integrate the interface block support here. For now, don't complain --
1451             * a valid SPIRV module will only hit this path for the interface block case, as the
1452             * individual members of the type are decorated, rather than variable declarations.
1453             */
1454
1455            if (location != -1) {
1456                /* A user-defined interface variable, with a location. Where a variable
1457                 * occupied multiple locations, emit one result for each. */
1458                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1459                for (unsigned int offset = 0; offset < num_locations; offset++) {
1460                    interface_var v;
1461                    v.id = id;
1462                    v.type_id = type;
1463                    v.offset = offset;
1464                    v.is_patch = is_patch;
1465                    v.is_block_member = false;
1466                    out[std::make_pair(location + offset, component)] = v;
1467                }
1468            } else if (builtin == -1) {
1469                /* An interface block instance */
1470                collect_interface_block_members(src, out, blocks, is_array_of_verts, id, type, is_patch);
1471            }
1472        }
1473    }
1474}
1475
1476static void collect_interface_by_descriptor_slot(debug_report_data *report_data, shader_module const *src,
1477                                                 std::unordered_set<uint32_t> const &accessible_ids,
1478                                                 std::map<descriptor_slot_t, interface_var> &out) {
1479
1480    std::unordered_map<unsigned, unsigned> var_sets;
1481    std::unordered_map<unsigned, unsigned> var_bindings;
1482
1483    for (auto insn : *src) {
1484        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1485         * DecorationDescriptorSet and DecorationBinding.
1486         */
1487        if (insn.opcode() == spv::OpDecorate) {
1488            if (insn.word(2) == spv::DecorationDescriptorSet) {
1489                var_sets[insn.word(1)] = insn.word(3);
1490            }
1491
1492            if (insn.word(2) == spv::DecorationBinding) {
1493                var_bindings[insn.word(1)] = insn.word(3);
1494            }
1495        }
1496    }
1497
1498    for (auto id : accessible_ids) {
1499        auto insn = src->get_def(id);
1500        assert(insn != src->end());
1501
1502        if (insn.opcode() == spv::OpVariable &&
1503            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1504            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1505            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1506
1507            auto existing_it = out.find(std::make_pair(set, binding));
1508            if (existing_it != out.end()) {
1509                /* conflict within spv image */
1510                log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1511                        __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
1512                        "var %d (type %d) in %s interface in descriptor slot (%u,%u) conflicts with existing definition",
1513                        insn.word(2), insn.word(1), storage_class_name(insn.word(3)), existing_it->first.first,
1514                        existing_it->first.second);
1515            }
1516
1517            interface_var v;
1518            v.id = insn.word(2);
1519            v.type_id = insn.word(1);
1520            v.offset = 0;
1521            v.is_patch = false;
1522            v.is_block_member = false;
1523            out[std::make_pair(set, binding)] = v;
1524        }
1525    }
1526}
1527
1528static bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer,
1529                                              spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1530                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1531                                              shader_stage_attributes const *consumer_stage) {
1532    std::map<location_t, interface_var> outputs;
1533    std::map<location_t, interface_var> inputs;
1534
1535    bool pass = true;
1536
1537    collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, outputs, producer_stage->arrayed_output);
1538    collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, inputs, consumer_stage->arrayed_input);
1539
1540    auto a_it = outputs.begin();
1541    auto b_it = inputs.begin();
1542
1543    /* maps sorted by key (location); walk them together to find mismatches */
1544    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1545        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1546        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1547        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1548        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1549
1550        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1551            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1552                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1553                        "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1554                        a_first.second, consumer_stage->name)) {
1555                pass = false;
1556            }
1557            a_it++;
1558        } else if (a_at_end || a_first > b_first) {
1559            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1560                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1561                        "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second,
1562                        producer_stage->name)) {
1563                pass = false;
1564            }
1565            b_it++;
1566        } else {
1567            // subtleties of arrayed interfaces:
1568            // - if is_patch, then the member is not arrayed, even though the interface may be.
1569            // - if is_block_member, then the extra array level of an arrayed interface is not
1570            //   expressed in the member type -- it's expressed in the block type.
1571            if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1572                             producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1573                             consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member,
1574                             true)) {
1575                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1576                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1577                            a_first.first, a_first.second,
1578                            describe_type(producer, a_it->second.type_id).c_str(),
1579                            describe_type(consumer, b_it->second.type_id).c_str())) {
1580                    pass = false;
1581                }
1582            }
1583            if (a_it->second.is_patch != b_it->second.is_patch) {
1584                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1585                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1586                            "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1587                            "per-%s in %s stage", a_first.first, a_first.second,
1588                            a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1589                            b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1590                    pass = false;
1591                }
1592            }
1593            a_it++;
1594            b_it++;
1595        }
1596    }
1597
1598    return pass;
1599}
1600
1601enum FORMAT_TYPE {
1602    FORMAT_TYPE_UNDEFINED,
1603    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1604    FORMAT_TYPE_SINT,
1605    FORMAT_TYPE_UINT,
1606};
1607
1608static unsigned get_format_type(VkFormat fmt) {
1609    switch (fmt) {
1610    case VK_FORMAT_UNDEFINED:
1611        return FORMAT_TYPE_UNDEFINED;
1612    case VK_FORMAT_R8_SINT:
1613    case VK_FORMAT_R8G8_SINT:
1614    case VK_FORMAT_R8G8B8_SINT:
1615    case VK_FORMAT_R8G8B8A8_SINT:
1616    case VK_FORMAT_R16_SINT:
1617    case VK_FORMAT_R16G16_SINT:
1618    case VK_FORMAT_R16G16B16_SINT:
1619    case VK_FORMAT_R16G16B16A16_SINT:
1620    case VK_FORMAT_R32_SINT:
1621    case VK_FORMAT_R32G32_SINT:
1622    case VK_FORMAT_R32G32B32_SINT:
1623    case VK_FORMAT_R32G32B32A32_SINT:
1624    case VK_FORMAT_R64_SINT:
1625    case VK_FORMAT_R64G64_SINT:
1626    case VK_FORMAT_R64G64B64_SINT:
1627    case VK_FORMAT_R64G64B64A64_SINT:
1628    case VK_FORMAT_B8G8R8_SINT:
1629    case VK_FORMAT_B8G8R8A8_SINT:
1630    case VK_FORMAT_A8B8G8R8_SINT_PACK32:
1631    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1632    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1633        return FORMAT_TYPE_SINT;
1634    case VK_FORMAT_R8_UINT:
1635    case VK_FORMAT_R8G8_UINT:
1636    case VK_FORMAT_R8G8B8_UINT:
1637    case VK_FORMAT_R8G8B8A8_UINT:
1638    case VK_FORMAT_R16_UINT:
1639    case VK_FORMAT_R16G16_UINT:
1640    case VK_FORMAT_R16G16B16_UINT:
1641    case VK_FORMAT_R16G16B16A16_UINT:
1642    case VK_FORMAT_R32_UINT:
1643    case VK_FORMAT_R32G32_UINT:
1644    case VK_FORMAT_R32G32B32_UINT:
1645    case VK_FORMAT_R32G32B32A32_UINT:
1646    case VK_FORMAT_R64_UINT:
1647    case VK_FORMAT_R64G64_UINT:
1648    case VK_FORMAT_R64G64B64_UINT:
1649    case VK_FORMAT_R64G64B64A64_UINT:
1650    case VK_FORMAT_B8G8R8_UINT:
1651    case VK_FORMAT_B8G8R8A8_UINT:
1652    case VK_FORMAT_A8B8G8R8_UINT_PACK32:
1653    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1654    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1655        return FORMAT_TYPE_UINT;
1656    default:
1657        return FORMAT_TYPE_FLOAT;
1658    }
1659}
1660
1661/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1662 * for comparison to a VkFormat's characterization above. */
1663static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1664    auto insn = src->get_def(type);
1665    assert(insn != src->end());
1666
1667    switch (insn.opcode()) {
1668    case spv::OpTypeInt:
1669        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1670    case spv::OpTypeFloat:
1671        return FORMAT_TYPE_FLOAT;
1672    case spv::OpTypeVector:
1673        return get_fundamental_type(src, insn.word(2));
1674    case spv::OpTypeMatrix:
1675        return get_fundamental_type(src, insn.word(2));
1676    case spv::OpTypeArray:
1677        return get_fundamental_type(src, insn.word(2));
1678    case spv::OpTypePointer:
1679        return get_fundamental_type(src, insn.word(3));
1680    default:
1681        return FORMAT_TYPE_UNDEFINED;
1682    }
1683}
1684
1685static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1686    uint32_t bit_pos = u_ffs(stage);
1687    return bit_pos - 1;
1688}
1689
1690static bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1691    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1692     * each binding should be specified only once.
1693     */
1694    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1695    bool pass = true;
1696
1697    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1698        auto desc = &vi->pVertexBindingDescriptions[i];
1699        auto &binding = bindings[desc->binding];
1700        if (binding) {
1701            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1702                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1703                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1704                pass = false;
1705            }
1706        } else {
1707            binding = desc;
1708        }
1709    }
1710
1711    return pass;
1712}
1713
1714static bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
1715                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1716    std::map<location_t, interface_var> inputs;
1717    bool pass = true;
1718
1719    collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, inputs, false);
1720
1721    /* Build index by location */
1722    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1723    if (vi) {
1724        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1725            auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1726            for (auto j = 0u; j < num_locations; j++) {
1727                attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1728            }
1729        }
1730    }
1731
1732    auto it_a = attribs.begin();
1733    auto it_b = inputs.begin();
1734
1735    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1736        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1737        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1738        auto a_first = a_at_end ? 0 : it_a->first;
1739        auto b_first = b_at_end ? 0 : it_b->first.first;
1740        if (!a_at_end && (b_at_end || a_first < b_first)) {
1741            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1742                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1743                        "Vertex attribute at location %d not consumed by VS", a_first)) {
1744                pass = false;
1745            }
1746            it_a++;
1747        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1748            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1749                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
1750                        b_first)) {
1751                pass = false;
1752            }
1753            it_b++;
1754        } else {
1755            unsigned attrib_type = get_format_type(it_a->second->format);
1756            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1757
1758            /* type checking */
1759            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1760                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1761                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1762                            "Attribute type of `%s` at location %d does not match VS input type of `%s`",
1763                            string_VkFormat(it_a->second->format), a_first,
1764                            describe_type(vs, it_b->second.type_id).c_str())) {
1765                    pass = false;
1766                }
1767            }
1768
1769            /* OK! */
1770            it_a++;
1771            it_b++;
1772        }
1773    }
1774
1775    return pass;
1776}
1777
1778static bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs,
1779                                                    spirv_inst_iter entrypoint, RENDER_PASS_NODE const *rp, uint32_t subpass) {
1780    std::map<location_t, interface_var> outputs;
1781    std::map<uint32_t, VkFormat> color_attachments;
1782    for (auto i = 0u; i < rp->subpassColorFormats[subpass].size(); i++) {
1783        if (rp->subpassColorFormats[subpass][i] != VK_FORMAT_UNDEFINED) {
1784            color_attachments[i] = rp->subpassColorFormats[subpass][i];
1785        }
1786    }
1787
1788    bool pass = true;
1789
1790    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1791
1792    collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, outputs, false);
1793
1794    auto it_a = outputs.begin();
1795    auto it_b = color_attachments.begin();
1796
1797    /* Walk attachment list and outputs together */
1798
1799    while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
1800        bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
1801        bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
1802
1803        if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
1804            if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1805                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1806                        "FS writes to output location %d with no matching attachment", it_a->first.first)) {
1807                pass = false;
1808            }
1809            it_a++;
1810        } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
1811            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1812                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", it_b->first)) {
1813                pass = false;
1814            }
1815            it_b++;
1816        } else {
1817            unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
1818            unsigned att_type = get_format_type(it_b->second);
1819
1820            /* type checking */
1821            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
1822                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1823                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1824                            "Attachment %d of type `%s` does not match FS output type of `%s`", it_b->first,
1825                            string_VkFormat(it_b->second),
1826                            describe_type(fs, it_a->second.type_id).c_str())) {
1827                    pass = false;
1828                }
1829            }
1830
1831            /* OK! */
1832            it_a++;
1833            it_b++;
1834        }
1835    }
1836
1837    return pass;
1838}
1839
1840/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
1841 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
1842 * for example.
1843 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1844 *  - NOT the shader input/output interfaces.
1845 *
1846 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1847 * converting parts of this to be generated from the machine-readable spec instead.
1848 */
1849static void mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint, std::unordered_set<uint32_t> &ids) {
1850    std::unordered_set<uint32_t> worklist;
1851    worklist.insert(entrypoint.word(2));
1852
1853    while (!worklist.empty()) {
1854        auto id_iter = worklist.begin();
1855        auto id = *id_iter;
1856        worklist.erase(id_iter);
1857
1858        auto insn = src->get_def(id);
1859        if (insn == src->end()) {
1860            /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble
1861             * across all kinds of things here that we may not care about. */
1862            continue;
1863        }
1864
1865        /* try to add to the output set */
1866        if (!ids.insert(id).second) {
1867            continue; /* if we already saw this id, we don't want to walk it again. */
1868        }
1869
1870        switch (insn.opcode()) {
1871        case spv::OpFunction:
1872            /* scan whole body of the function, enlisting anything interesting */
1873            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1874                switch (insn.opcode()) {
1875                case spv::OpLoad:
1876                case spv::OpAtomicLoad:
1877                case spv::OpAtomicExchange:
1878                case spv::OpAtomicCompareExchange:
1879                case spv::OpAtomicCompareExchangeWeak:
1880                case spv::OpAtomicIIncrement:
1881                case spv::OpAtomicIDecrement:
1882                case spv::OpAtomicIAdd:
1883                case spv::OpAtomicISub:
1884                case spv::OpAtomicSMin:
1885                case spv::OpAtomicUMin:
1886                case spv::OpAtomicSMax:
1887                case spv::OpAtomicUMax:
1888                case spv::OpAtomicAnd:
1889                case spv::OpAtomicOr:
1890                case spv::OpAtomicXor:
1891                    worklist.insert(insn.word(3)); /* ptr */
1892                    break;
1893                case spv::OpStore:
1894                case spv::OpAtomicStore:
1895                    worklist.insert(insn.word(1)); /* ptr */
1896                    break;
1897                case spv::OpAccessChain:
1898                case spv::OpInBoundsAccessChain:
1899                    worklist.insert(insn.word(3)); /* base ptr */
1900                    break;
1901                case spv::OpSampledImage:
1902                case spv::OpImageSampleImplicitLod:
1903                case spv::OpImageSampleExplicitLod:
1904                case spv::OpImageSampleDrefImplicitLod:
1905                case spv::OpImageSampleDrefExplicitLod:
1906                case spv::OpImageSampleProjImplicitLod:
1907                case spv::OpImageSampleProjExplicitLod:
1908                case spv::OpImageSampleProjDrefImplicitLod:
1909                case spv::OpImageSampleProjDrefExplicitLod:
1910                case spv::OpImageFetch:
1911                case spv::OpImageGather:
1912                case spv::OpImageDrefGather:
1913                case spv::OpImageRead:
1914                case spv::OpImage:
1915                case spv::OpImageQueryFormat:
1916                case spv::OpImageQueryOrder:
1917                case spv::OpImageQuerySizeLod:
1918                case spv::OpImageQuerySize:
1919                case spv::OpImageQueryLod:
1920                case spv::OpImageQueryLevels:
1921                case spv::OpImageQuerySamples:
1922                case spv::OpImageSparseSampleImplicitLod:
1923                case spv::OpImageSparseSampleExplicitLod:
1924                case spv::OpImageSparseSampleDrefImplicitLod:
1925                case spv::OpImageSparseSampleDrefExplicitLod:
1926                case spv::OpImageSparseSampleProjImplicitLod:
1927                case spv::OpImageSparseSampleProjExplicitLod:
1928                case spv::OpImageSparseSampleProjDrefImplicitLod:
1929                case spv::OpImageSparseSampleProjDrefExplicitLod:
1930                case spv::OpImageSparseFetch:
1931                case spv::OpImageSparseGather:
1932                case spv::OpImageSparseDrefGather:
1933                case spv::OpImageTexelPointer:
1934                    worklist.insert(insn.word(3)); /* image or sampled image */
1935                    break;
1936                case spv::OpImageWrite:
1937                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
1938                    break;
1939                case spv::OpFunctionCall:
1940                    for (uint32_t i = 3; i < insn.len(); i++) {
1941                        worklist.insert(insn.word(i)); /* fn itself, and all args */
1942                    }
1943                    break;
1944
1945                case spv::OpExtInst:
1946                    for (uint32_t i = 5; i < insn.len(); i++) {
1947                        worklist.insert(insn.word(i)); /* operands to ext inst */
1948                    }
1949                    break;
1950                }
1951            }
1952            break;
1953        }
1954    }
1955}
1956
1957static bool validate_push_constant_block_against_pipeline(debug_report_data *report_data,
1958                                                          std::vector<VkPushConstantRange> const *pushConstantRanges,
1959                                                          shader_module const *src, spirv_inst_iter type,
1960                                                          VkShaderStageFlagBits stage) {
1961    bool pass = true;
1962
1963    /* strip off ptrs etc */
1964    type = get_struct_type(src, type, false);
1965    assert(type != src->end());
1966
1967    /* validate directly off the offsets. this isn't quite correct for arrays
1968     * and matrices, but is a good first step. TODO: arrays, matrices, weird
1969     * sizes */
1970    for (auto insn : *src) {
1971        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1972
1973            if (insn.word(3) == spv::DecorationOffset) {
1974                unsigned offset = insn.word(4);
1975                auto size = 4; /* bytes; TODO: calculate this based on the type */
1976
1977                bool found_range = false;
1978                for (auto const &range : *pushConstantRanges) {
1979                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
1980                        found_range = true;
1981
1982                        if ((range.stageFlags & stage) == 0) {
1983                            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1984                                        __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
1985                                        "Push constant range covering variable starting at "
1986                                        "offset %u not accessible from stage %s",
1987                                        offset, string_VkShaderStageFlagBits(stage))) {
1988                                pass = false;
1989                            }
1990                        }
1991
1992                        break;
1993                    }
1994                }
1995
1996                if (!found_range) {
1997                    if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1998                                __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
1999                                "Push constant range covering variable starting at "
2000                                "offset %u not declared in layout",
2001                                offset)) {
2002                        pass = false;
2003                    }
2004                }
2005            }
2006        }
2007    }
2008
2009    return pass;
2010}
2011
2012static bool validate_push_constant_usage(debug_report_data *report_data,
2013                                         std::vector<VkPushConstantRange> const *pushConstantRanges, shader_module const *src,
2014                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2015    bool pass = true;
2016
2017    for (auto id : accessible_ids) {
2018        auto def_insn = src->get_def(id);
2019        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
2020            pass &= validate_push_constant_block_against_pipeline(report_data, pushConstantRanges, src,
2021                                                                 src->get_def(def_insn.word(1)), stage);
2022        }
2023    }
2024
2025    return pass;
2026}
2027
2028// For given pipelineLayout verify that the set_layout_node at slot.first
2029//  has the requested binding at slot.second and return ptr to that binding
2030static VkDescriptorSetLayoutBinding const * get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout, descriptor_slot_t slot) {
2031
2032    if (!pipelineLayout)
2033        return nullptr;
2034
2035    if (slot.first >= pipelineLayout->descriptorSetLayouts.size())
2036        return nullptr;
2037
2038    return pipelineLayout->setLayouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
2039}
2040
2041// Block of code at start here for managing/tracking Pipeline state that this layer cares about
2042
2043static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
2044
2045// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2046//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2047//   to that same cmd buffer by separate thread are not changing state from underneath us
2048// Track the last cmd buffer touched by this thread
2049
2050static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2051    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2052        if (pCB->drawCount[i])
2053            return true;
2054    }
2055    return false;
2056}
2057
2058// Check object status for selected flag state
2059static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
2060                            DRAW_STATE_ERROR error_code, const char *fail_msg) {
2061    if (!(pNode->status & status_mask)) {
2062        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2063                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS",
2064                       "CB object 0x%" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg);
2065    }
2066    return false;
2067}
2068
2069// Retrieve pipeline node ptr for given pipeline object
2070static PIPELINE_NODE *getPipeline(layer_data const *my_data, VkPipeline pipeline) {
2071    auto it = my_data->pipelineMap.find(pipeline);
2072    if (it == my_data->pipelineMap.end()) {
2073        return nullptr;
2074    }
2075    return it->second;
2076}
2077
2078static RENDER_PASS_NODE *getRenderPass(layer_data const *my_data, VkRenderPass renderpass) {
2079    auto it = my_data->renderPassMap.find(renderpass);
2080    if (it == my_data->renderPassMap.end()) {
2081        return nullptr;
2082    }
2083    return it->second;
2084}
2085
2086static FRAMEBUFFER_NODE *getFramebuffer(const layer_data *my_data, VkFramebuffer framebuffer) {
2087    auto it = my_data->frameBufferMap.find(framebuffer);
2088    if (it == my_data->frameBufferMap.end()) {
2089        return nullptr;
2090    }
2091    return it->second.get();
2092}
2093
2094cvdescriptorset::DescriptorSetLayout const *getDescriptorSetLayout(layer_data const *my_data, VkDescriptorSetLayout dsLayout) {
2095    auto it = my_data->descriptorSetLayoutMap.find(dsLayout);
2096    if (it == my_data->descriptorSetLayoutMap.end()) {
2097        return nullptr;
2098    }
2099    return it->second;
2100}
2101
2102static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *my_data, VkPipelineLayout pipeLayout) {
2103    auto it = my_data->pipelineLayoutMap.find(pipeLayout);
2104    if (it == my_data->pipelineLayoutMap.end()) {
2105        return nullptr;
2106    }
2107    return &it->second;
2108}
2109
2110// Return true if for a given PSO, the given state enum is dynamic, else return false
2111static bool isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
2112    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2113        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2114            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2115                return true;
2116        }
2117    }
2118    return false;
2119}
2120
2121// Validate state stored as flags at time of draw call
2122static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe, bool indexedDraw) {
2123    bool result;
2124    result = validate_status(dev_data, pCB, CBSTATUS_VIEWPORT_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_VIEWPORT_NOT_BOUND,
2125                             "Dynamic viewport state not set for this command buffer");
2126    result |= validate_status(dev_data, pCB, CBSTATUS_SCISSOR_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT, DRAWSTATE_SCISSOR_NOT_BOUND,
2127                              "Dynamic scissor state not set for this command buffer");
2128    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
2129        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2130         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
2131        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2132                                  DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
2133    }
2134    if (pPipe->graphicsPipelineCI.pRasterizationState &&
2135        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
2136        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2137                                  DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
2138    }
2139    if (pPipe->blendConstantsEnabled) {
2140        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2141                                  DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer");
2142    }
2143    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2144        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
2145        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2146                                  DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
2147    }
2148    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2149        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
2150        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2151                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
2152        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2153                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
2154        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2155                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
2156    }
2157    if (indexedDraw) {
2158        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2159                                  DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2160                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2161    }
2162    return result;
2163}
2164
2165// Verify attachment reference compatibility according to spec
2166//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2167//  If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions
2168//   to make sure that format and samples counts match.
2169//  If not, they are not compatible.
2170static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2171                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2172                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2173                                             const VkAttachmentDescription *pSecondaryAttachments) {
2174    // Check potential NULL cases first to avoid nullptr issues later
2175    if (pPrimary == nullptr) {
2176        if (pSecondary == nullptr) {
2177            return true;
2178        }
2179        return false;
2180    } else if (pSecondary == nullptr) {
2181        return false;
2182    }
2183    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2184        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2185            return true;
2186    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2187        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2188            return true;
2189    } else { // Format and sample count must match
2190        if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2191            return true;
2192        } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2193            return false;
2194        }
2195        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2196             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2197            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2198             pSecondaryAttachments[pSecondary[index].attachment].samples))
2199            return true;
2200    }
2201    // Format and sample counts didn't match
2202    return false;
2203}
2204
2205// For given primary and secondary RenderPass objects, verify that they're compatible
2206static bool verify_renderpass_compatibility(const layer_data *my_data, const VkRenderPass primaryRP, const VkRenderPass secondaryRP,
2207                                            string &errorMsg) {
2208    auto primary_render_pass = getRenderPass(my_data, primaryRP);
2209    auto secondary_render_pass = getRenderPass(my_data, secondaryRP);
2210
2211    if (!primary_render_pass) {
2212        stringstream errorStr;
2213        errorStr << "invalid VkRenderPass (" << primaryRP << ")";
2214        errorMsg = errorStr.str();
2215        return false;
2216    }
2217
2218    if (!secondary_render_pass) {
2219        stringstream errorStr;
2220        errorStr << "invalid VkRenderPass (" << secondaryRP << ")";
2221        errorMsg = errorStr.str();
2222        return false;
2223    }
2224    // Trivial pass case is exact same RP
2225    if (primaryRP == secondaryRP) {
2226        return true;
2227    }
2228    const VkRenderPassCreateInfo *primaryRPCI = primary_render_pass->pCreateInfo;
2229    const VkRenderPassCreateInfo *secondaryRPCI = secondary_render_pass->pCreateInfo;
2230    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2231        stringstream errorStr;
2232        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2233                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2234        errorMsg = errorStr.str();
2235        return false;
2236    }
2237    uint32_t spIndex = 0;
2238    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2239        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2240        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2241        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2242        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2243        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2244            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2245                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2246                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2247                stringstream errorStr;
2248                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2249                errorMsg = errorStr.str();
2250                return false;
2251            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2252                                                         primaryColorCount, primaryRPCI->pAttachments,
2253                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2254                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2255                stringstream errorStr;
2256                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2257                errorMsg = errorStr.str();
2258                return false;
2259            }
2260        }
2261
2262        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2263                                              1, primaryRPCI->pAttachments,
2264                                              secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2265                                              1, secondaryRPCI->pAttachments)) {
2266            stringstream errorStr;
2267            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2268            errorMsg = errorStr.str();
2269            return false;
2270        }
2271
2272        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2273        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2274        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2275        for (uint32_t i = 0; i < inputMax; ++i) {
2276            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2277                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2278                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2279                stringstream errorStr;
2280                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2281                errorMsg = errorStr.str();
2282                return false;
2283            }
2284        }
2285    }
2286    return true;
2287}
2288
2289// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
2290// pipelineLayout[layoutIndex]
2291static bool verify_set_layout_compatibility(layer_data *my_data, const cvdescriptorset::DescriptorSet *pSet,
2292                                            const VkPipelineLayout layout, const uint32_t layoutIndex, string &errorMsg) {
2293    auto pipeline_layout = getPipelineLayout(my_data, layout);
2294    if (!pipeline_layout) {
2295        stringstream errorStr;
2296        errorStr << "invalid VkPipelineLayout (" << layout << ")";
2297        errorMsg = errorStr.str();
2298        return false;
2299    }
2300    if (layoutIndex >= pipeline_layout->descriptorSetLayouts.size()) {
2301        stringstream errorStr;
2302        errorStr << "VkPipelineLayout (" << layout << ") only contains " << pipeline_layout->descriptorSetLayouts.size()
2303                 << " setLayouts corresponding to sets 0-" << pipeline_layout->descriptorSetLayouts.size() - 1
2304                 << ", but you're attempting to bind set to index " << layoutIndex;
2305        errorMsg = errorStr.str();
2306        return false;
2307    }
2308    auto layout_node = pipeline_layout->setLayouts[layoutIndex];
2309    return pSet->IsCompatible(layout_node, &errorMsg);
2310}
2311
2312// Validate that data for each specialization entry is fully contained within the buffer.
2313static bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) {
2314    bool pass = true;
2315
2316    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2317
2318    if (spec) {
2319        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2320            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2321                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2322                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2323                            "Specialization entry %u (for constant id %u) references memory outside provided "
2324                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2325                            " bytes provided)",
2326                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2327                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2328
2329                    pass = false;
2330                }
2331            }
2332        }
2333    }
2334
2335    return pass;
2336}
2337
2338static bool descriptor_type_match(shader_module const *module, uint32_t type_id,
2339                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2340    auto type = module->get_def(type_id);
2341
2342    descriptor_count = 1;
2343
2344    /* Strip off any array or ptrs. Where we remove array levels, adjust the
2345     * descriptor count for each dimension. */
2346    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2347        if (type.opcode() == spv::OpTypeArray) {
2348            descriptor_count *= get_constant_value(module, type.word(3));
2349            type = module->get_def(type.word(2));
2350        }
2351        else {
2352            type = module->get_def(type.word(3));
2353        }
2354    }
2355
2356    switch (type.opcode()) {
2357    case spv::OpTypeStruct: {
2358        for (auto insn : *module) {
2359            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2360                if (insn.word(2) == spv::DecorationBlock) {
2361                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2362                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2363                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2364                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2365                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2366                }
2367            }
2368        }
2369
2370        /* Invalid */
2371        return false;
2372    }
2373
2374    case spv::OpTypeSampler:
2375        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER;
2376
2377    case spv::OpTypeSampledImage:
2378        if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2379            /* Slight relaxation for some GLSL historical madness: samplerBuffer
2380             * doesn't really have a sampler, and a texel buffer descriptor
2381             * doesn't really provide one. Allow this slight mismatch.
2382             */
2383            auto image_type = module->get_def(type.word(2));
2384            auto dim = image_type.word(3);
2385            auto sampled = image_type.word(7);
2386            return dim == spv::DimBuffer && sampled == 1;
2387        }
2388        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2389
2390    case spv::OpTypeImage: {
2391        /* Many descriptor types backing image types-- depends on dimension
2392         * and whether the image will be used with a sampler. SPIRV for
2393         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2394         * runtime is unacceptable.
2395         */
2396        auto dim = type.word(3);
2397        auto sampled = type.word(7);
2398
2399        if (dim == spv::DimSubpassData) {
2400            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2401        } else if (dim == spv::DimBuffer) {
2402            if (sampled == 1) {
2403                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2404            } else {
2405                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2406            }
2407        } else if (sampled == 1) {
2408            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
2409        } else {
2410            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2411        }
2412    }
2413
2414    /* We shouldn't really see any other junk types -- but if we do, they're
2415     * a mismatch.
2416     */
2417    default:
2418        return false; /* Mismatch */
2419    }
2420}
2421
2422static bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) {
2423    if (!feature) {
2424        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2425                    __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2426                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2427                    "enabled on the device",
2428                    feature_name)) {
2429            return false;
2430        }
2431    }
2432
2433    return true;
2434}
2435
2436static bool validate_shader_capabilities(debug_report_data *report_data, shader_module const *src,
2437                                         VkPhysicalDeviceFeatures const *enabledFeatures) {
2438    bool pass = true;
2439
2440
2441    for (auto insn : *src) {
2442        if (insn.opcode() == spv::OpCapability) {
2443            switch (insn.word(1)) {
2444            case spv::CapabilityMatrix:
2445            case spv::CapabilityShader:
2446            case spv::CapabilityInputAttachment:
2447            case spv::CapabilitySampled1D:
2448            case spv::CapabilityImage1D:
2449            case spv::CapabilitySampledBuffer:
2450            case spv::CapabilityImageBuffer:
2451            case spv::CapabilityImageQuery:
2452            case spv::CapabilityDerivativeControl:
2453                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2454                break;
2455
2456            case spv::CapabilityGeometry:
2457                pass &= require_feature(report_data, enabledFeatures->geometryShader, "geometryShader");
2458                break;
2459
2460            case spv::CapabilityTessellation:
2461                pass &= require_feature(report_data, enabledFeatures->tessellationShader, "tessellationShader");
2462                break;
2463
2464            case spv::CapabilityFloat64:
2465                pass &= require_feature(report_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2466                break;
2467
2468            case spv::CapabilityInt64:
2469                pass &= require_feature(report_data, enabledFeatures->shaderInt64, "shaderInt64");
2470                break;
2471
2472            case spv::CapabilityTessellationPointSize:
2473            case spv::CapabilityGeometryPointSize:
2474                pass &= require_feature(report_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2475                                        "shaderTessellationAndGeometryPointSize");
2476                break;
2477
2478            case spv::CapabilityImageGatherExtended:
2479                pass &= require_feature(report_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2480                break;
2481
2482            case spv::CapabilityStorageImageMultisample:
2483                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2484                break;
2485
2486            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2487                pass &= require_feature(report_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2488                                        "shaderUniformBufferArrayDynamicIndexing");
2489                break;
2490
2491            case spv::CapabilitySampledImageArrayDynamicIndexing:
2492                pass &= require_feature(report_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2493                                        "shaderSampledImageArrayDynamicIndexing");
2494                break;
2495
2496            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2497                pass &= require_feature(report_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2498                                        "shaderStorageBufferArrayDynamicIndexing");
2499                break;
2500
2501            case spv::CapabilityStorageImageArrayDynamicIndexing:
2502                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2503                                        "shaderStorageImageArrayDynamicIndexing");
2504                break;
2505
2506            case spv::CapabilityClipDistance:
2507                pass &= require_feature(report_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2508                break;
2509
2510            case spv::CapabilityCullDistance:
2511                pass &= require_feature(report_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2512                break;
2513
2514            case spv::CapabilityImageCubeArray:
2515                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2516                break;
2517
2518            case spv::CapabilitySampleRateShading:
2519                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2520                break;
2521
2522            case spv::CapabilitySparseResidency:
2523                pass &= require_feature(report_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2524                break;
2525
2526            case spv::CapabilityMinLod:
2527                pass &= require_feature(report_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2528                break;
2529
2530            case spv::CapabilitySampledCubeArray:
2531                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2532                break;
2533
2534            case spv::CapabilityImageMSArray:
2535                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2536                break;
2537
2538            case spv::CapabilityStorageImageExtendedFormats:
2539                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageExtendedFormats,
2540                                        "shaderStorageImageExtendedFormats");
2541                break;
2542
2543            case spv::CapabilityInterpolationFunction:
2544                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2545                break;
2546
2547            case spv::CapabilityStorageImageReadWithoutFormat:
2548                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2549                                        "shaderStorageImageReadWithoutFormat");
2550                break;
2551
2552            case spv::CapabilityStorageImageWriteWithoutFormat:
2553                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2554                                        "shaderStorageImageWriteWithoutFormat");
2555                break;
2556
2557            case spv::CapabilityMultiViewport:
2558                pass &= require_feature(report_data, enabledFeatures->multiViewport, "multiViewport");
2559                break;
2560
2561            default:
2562                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2563                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2564                            "Shader declares capability %u, not supported in Vulkan.",
2565                            insn.word(1)))
2566                    pass = false;
2567                break;
2568            }
2569        }
2570    }
2571
2572    return pass;
2573}
2574
2575static bool validate_pipeline_shader_stage(debug_report_data *report_data,
2576                                           VkPipelineShaderStageCreateInfo const *pStage,
2577                                           PIPELINE_NODE *pipeline,
2578                                           shader_module **out_module,
2579                                           spirv_inst_iter *out_entrypoint,
2580                                           VkPhysicalDeviceFeatures const *enabledFeatures,
2581                                           std::unordered_map<VkShaderModule,
2582                                           std::unique_ptr<shader_module>> const &shaderModuleMap) {
2583    bool pass = true;
2584    auto module_it = shaderModuleMap.find(pStage->module);
2585    auto module = *out_module = module_it->second.get();
2586    pass &= validate_specialization_offsets(report_data, pStage);
2587
2588    /* find the entrypoint */
2589    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2590    if (entrypoint == module->end()) {
2591        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2592                    __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2593                    "No entrypoint found named `%s` for stage %s", pStage->pName,
2594                    string_VkShaderStageFlagBits(pStage->stage))) {
2595            pass = false;
2596        }
2597    }
2598
2599    /* validate shader capabilities against enabled device features */
2600    pass &= validate_shader_capabilities(report_data, module, enabledFeatures);
2601
2602    /* mark accessible ids */
2603    std::unordered_set<uint32_t> accessible_ids;
2604    mark_accessible_ids(module, entrypoint, accessible_ids);
2605
2606    /* validate descriptor set layout against what the entrypoint actually uses */
2607    std::map<descriptor_slot_t, interface_var> descriptor_uses;
2608    collect_interface_by_descriptor_slot(report_data, module, accessible_ids, descriptor_uses);
2609
2610    auto pipelineLayout = pipeline->pipelineLayout;
2611
2612    /* validate push constant usage */
2613    pass &= validate_push_constant_usage(report_data, &pipelineLayout->pushConstantRanges,
2614                                        module, accessible_ids, pStage->stage);
2615
2616    /* validate descriptor use */
2617    for (auto use : descriptor_uses) {
2618        // While validating shaders capture which slots are used by the pipeline
2619        pipeline->active_slots[use.first.first].insert(use.first.second);
2620
2621        /* verify given pipelineLayout has requested setLayout with requested binding */
2622        const auto & binding = get_descriptor_binding(pipelineLayout, use.first);
2623        unsigned required_descriptor_count;
2624
2625        if (!binding) {
2626            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2627                        __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2628                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2629                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2630                pass = false;
2631            }
2632        } else if (~binding->stageFlags & pStage->stage) {
2633            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2634                        /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2635                        "Shader uses descriptor slot %u.%u (used "
2636                        "as type `%s`) but descriptor not "
2637                        "accessible from stage %s",
2638                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2639                        string_VkShaderStageFlagBits(pStage->stage))) {
2640                pass = false;
2641            }
2642        } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType,
2643                                          /*out*/ required_descriptor_count)) {
2644            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2645                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", "Type mismatch on descriptor slot "
2646                                                                       "%u.%u (used as type `%s`) but "
2647                                                                       "descriptor of type %s",
2648                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2649                        string_VkDescriptorType(binding->descriptorType))) {
2650                pass = false;
2651            }
2652        } else if (binding->descriptorCount < required_descriptor_count) {
2653            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2654                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2655                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2656                        required_descriptor_count, use.first.first, use.first.second,
2657                        describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
2658                pass = false;
2659            }
2660        }
2661    }
2662
2663    return pass;
2664}
2665
2666
2667// Validate that the shaders used by the given pipeline and store the active_slots
2668//  that are actually used by the pipeline into pPipeline->active_slots
2669static bool validate_and_capture_pipeline_shader_state(debug_report_data *report_data, PIPELINE_NODE *pPipeline,
2670                                                       VkPhysicalDeviceFeatures const *enabledFeatures,
2671                                                       std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
2672    auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr();
2673    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2674    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2675
2676    shader_module *shaders[5];
2677    memset(shaders, 0, sizeof(shaders));
2678    spirv_inst_iter entrypoints[5];
2679    memset(entrypoints, 0, sizeof(entrypoints));
2680    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2681    bool pass = true;
2682
2683    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2684        auto pStage = &pCreateInfo->pStages[i];
2685        auto stage_id = get_shader_stage_id(pStage->stage);
2686        pass &= validate_pipeline_shader_stage(report_data, pStage, pPipeline,
2687                                               &shaders[stage_id], &entrypoints[stage_id],
2688                                               enabledFeatures, shaderModuleMap);
2689    }
2690
2691    vi = pCreateInfo->pVertexInputState;
2692
2693    if (vi) {
2694        pass &= validate_vi_consistency(report_data, vi);
2695    }
2696
2697    if (shaders[vertex_stage]) {
2698        pass &= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2699    }
2700
2701    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2702    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2703
2704    while (!shaders[producer] && producer != fragment_stage) {
2705        producer++;
2706        consumer++;
2707    }
2708
2709    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2710        assert(shaders[producer]);
2711        if (shaders[consumer]) {
2712            pass &= validate_interface_between_stages(report_data,
2713                                                      shaders[producer], entrypoints[producer], &shader_stage_attribs[producer],
2714                                                      shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]);
2715
2716            producer = consumer;
2717        }
2718    }
2719
2720    if (shaders[fragment_stage] && pPipeline->renderPass) {
2721        pass &= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage],
2722                                                        pPipeline->renderPass, pCreateInfo->subpass);
2723    }
2724
2725    return pass;
2726}
2727
2728static bool validate_compute_pipeline(debug_report_data *report_data, PIPELINE_NODE *pPipeline, VkPhysicalDeviceFeatures const *enabledFeatures,
2729                                      std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
2730    auto pCreateInfo = pPipeline->computePipelineCI.ptr();
2731
2732    shader_module *module;
2733    spirv_inst_iter entrypoint;
2734
2735    return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline,
2736                                          &module, &entrypoint, enabledFeatures, shaderModuleMap);
2737}
2738// Return Set node ptr for specified set or else NULL
2739cvdescriptorset::DescriptorSet *getSetNode(const layer_data *my_data, VkDescriptorSet set) {
2740    auto set_it = my_data->setMap.find(set);
2741    if (set_it == my_data->setMap.end()) {
2742        return NULL;
2743    }
2744    return set_it->second;
2745}
2746// For the given command buffer, verify and update the state for activeSetBindingsPairs
2747//  This includes:
2748//  1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound.
2749//     To be valid, the dynamic offset combined with the offset and range from its
2750//     descriptor update must not overflow the size of its buffer being updated
2751//  2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images
2752//  3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers
2753static bool validate_and_update_drawtime_descriptor_state(
2754    layer_data *dev_data, GLOBAL_CB_NODE *pCB,
2755    const vector<std::tuple<cvdescriptorset::DescriptorSet *, unordered_set<uint32_t>,
2756                            std::vector<uint32_t> const *>> &activeSetBindingsPairs) {
2757    bool result = false;
2758    for (auto set_bindings_pair : activeSetBindingsPairs) {
2759        cvdescriptorset::DescriptorSet *set_node = std::get<0>(set_bindings_pair);
2760        std::string err_str;
2761        if (!set_node->ValidateDrawState(std::get<1>(set_bindings_pair), *std::get<2>(set_bindings_pair),
2762                                         &err_str)) {
2763            // Report error here
2764            auto set = set_node->GetSet();
2765            result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2766                              reinterpret_cast<const uint64_t &>(set), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2767                              "DS 0x%" PRIxLEAST64 " encountered the following validation error at draw time: %s",
2768                              reinterpret_cast<const uint64_t &>(set), err_str.c_str());
2769        }
2770        set_node->GetStorageUpdates(std::get<1>(set_bindings_pair), &pCB->updateBuffers, &pCB->updateImages);
2771    }
2772    return result;
2773}
2774
2775// For given pipeline, return number of MSAA samples, or one if MSAA disabled
2776static VkSampleCountFlagBits getNumSamples(PIPELINE_NODE const *pipe) {
2777    if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
2778        VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
2779        return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
2780    }
2781    return VK_SAMPLE_COUNT_1_BIT;
2782}
2783
2784// Validate draw-time state related to the PSO
2785static bool validatePipelineDrawtimeState(layer_data const *my_data,
2786                                          LAST_BOUND_STATE const &state,
2787                                          const GLOBAL_CB_NODE *pCB,
2788                                          PIPELINE_NODE const *pPipeline) {
2789    bool skip_call = false;
2790
2791    // Verify Vtx binding
2792    if (pPipeline->vertexBindingDescriptions.size() > 0) {
2793        for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
2794            if ((pCB->currentDrawData.buffers.size() < (i + 1)) || (pCB->currentDrawData.buffers[i] == VK_NULL_HANDLE)) {
2795                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2796                                  __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2797                                  "The Pipeline State Object (0x%" PRIxLEAST64
2798                                  ") expects that this Command Buffer's vertex binding Index " PRINTF_SIZE_T_SPECIFIER
2799                                  " should be set via vkCmdBindVertexBuffers.",
2800                                  (uint64_t)state.pipeline, i);
2801            }
2802        }
2803    } else {
2804        if (!pCB->currentDrawData.buffers.empty()) {
2805            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
2806                              0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2807                              "Vertex buffers are bound to command buffer (0x%" PRIxLEAST64
2808                              ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
2809                              (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline);
2810        }
2811    }
2812    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2813    // Skip check if rasterization is disabled or there is no viewport.
2814    if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
2815         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
2816        pPipeline->graphicsPipelineCI.pViewportState) {
2817        bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
2818        bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
2819        if (dynViewport) {
2820            if (pCB->viewports.size() != pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
2821                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2822                                  __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2823                                  "Dynamic viewportCount from vkCmdSetViewport() is " PRINTF_SIZE_T_SPECIFIER
2824                                  ", but PSO viewportCount is %u. These counts must match.",
2825                                  pCB->viewports.size(), pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
2826            }
2827        }
2828        if (dynScissor) {
2829            if (pCB->scissors.size() != pPipeline->graphicsPipelineCI.pViewportState->scissorCount) {
2830                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
2831                                  __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2832                                  "Dynamic scissorCount from vkCmdSetScissor() is " PRINTF_SIZE_T_SPECIFIER
2833                                  ", but PSO scissorCount is %u. These counts must match.",
2834                                  pCB->scissors.size(), pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
2835            }
2836        }
2837    }
2838
2839    // Verify that any MSAA request in PSO matches sample# in bound FB
2840    // Skip the check if rasterization is disabled.
2841    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
2842        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
2843        VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
2844        if (pCB->activeRenderPass) {
2845            const VkRenderPassCreateInfo *render_pass_info = pCB->activeRenderPass->pCreateInfo;
2846            const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
2847            uint32_t i;
2848
2849            const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
2850            if ((color_blend_state != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) &&
2851                (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount)) {
2852                skip_call |=
2853                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2854                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
2855                                "Render pass subpass %u mismatch with blending state defined and blend state attachment "
2856                                "count %u while subpass color attachment count %u in Pipeline (0x%" PRIxLEAST64 ")!  These "
2857                                "must be the same at draw-time.",
2858                                pCB->activeSubpass, color_blend_state->attachmentCount, subpass_desc->colorAttachmentCount,
2859                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
2860            }
2861
2862            unsigned subpass_num_samples = 0;
2863
2864            for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
2865                auto attachment = subpass_desc->pColorAttachments[i].attachment;
2866                if (attachment != VK_ATTACHMENT_UNUSED)
2867                    subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
2868            }
2869
2870            if (subpass_desc->pDepthStencilAttachment &&
2871                subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
2872                auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
2873                subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
2874            }
2875
2876            if (subpass_num_samples && pso_num_samples != subpass_num_samples) {
2877                skip_call |=
2878                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2879                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
2880                                "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
2881                                ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
2882                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples,
2883                                reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples);
2884            }
2885        } else {
2886            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2887                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
2888                                 "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
2889                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
2890        }
2891    }
2892    // Verify that PSO creation renderPass is compatible with active renderPass
2893    if (pCB->activeRenderPass) {
2894        std::string err_string;
2895        if (!verify_renderpass_compatibility(my_data, pCB->activeRenderPass->renderPass, pPipeline->graphicsPipelineCI.renderPass,
2896                                             err_string)) {
2897            // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
2898            skip_call |=
2899                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2900                        reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
2901                        "At Draw time the active render pass (0x%" PRIxLEAST64 ") is incompatible w/ gfx pipeline "
2902                        "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
2903                        reinterpret_cast<uint64_t &>(pCB->activeRenderPass->renderPass), reinterpret_cast<uint64_t &>(pPipeline),
2904                        reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
2905        }
2906    }
2907    // TODO : Add more checks here
2908
2909    return skip_call;
2910}
2911
2912// Validate overall state at the time of a draw call
2913static bool validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *pCB, const bool indexedDraw,
2914                                           const VkPipelineBindPoint bindPoint) {
2915    bool result = false;
2916    auto const &state = pCB->lastBound[bindPoint];
2917    PIPELINE_NODE *pPipe = getPipeline(my_data, state.pipeline);
2918    if (nullptr == pPipe) {
2919        result |= log_msg(
2920            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2921            DRAWSTATE_INVALID_PIPELINE, "DS",
2922            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
2923        // Early return as any further checks below will be busted w/o a pipeline
2924        if (result)
2925            return true;
2926    }
2927    // First check flag states
2928    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
2929        result = validate_draw_state_flags(my_data, pCB, pPipe, indexedDraw);
2930
2931    // Now complete other state checks
2932    if (state.pipelineLayout) {
2933        string errorString;
2934        auto pipelineLayout = (bindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) ? pPipe->graphicsPipelineCI.layout : pPipe->computePipelineCI.layout;
2935
2936        // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
2937        vector<std::tuple<cvdescriptorset::DescriptorSet *, unordered_set<uint32_t>, std::vector<uint32_t> const *>> activeSetBindingsPairs;
2938        for (auto & setBindingPair : pPipe->active_slots) {
2939            uint32_t setIndex = setBindingPair.first;
2940            // If valid set is not bound throw an error
2941            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
2942                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2943                                  DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
2944                                  "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline,
2945                                  setIndex);
2946            } else if (!verify_set_layout_compatibility(my_data, state.boundDescriptorSets[setIndex],
2947                                                        pipelineLayout, setIndex, errorString)) {
2948                // Set is bound but not compatible w/ overlapping pipelineLayout from PSO
2949                VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
2950                result |=
2951                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2952                            (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
2953                            "VkDescriptorSet (0x%" PRIxLEAST64
2954                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
2955                            (uint64_t)setHandle, setIndex, (uint64_t)pipelineLayout, errorString.c_str());
2956            } else { // Valid set is bound and layout compatible, validate that it's updated
2957                // Pull the set node
2958                cvdescriptorset::DescriptorSet *pSet = state.boundDescriptorSets[setIndex];
2959                // Save vector of all active sets to verify dynamicOffsets below
2960                activeSetBindingsPairs.push_back(std::make_tuple(pSet, setBindingPair.second,
2961                                                                 &state.dynamicOffsets[setIndex]));
2962                // Make sure set has been updated if it has no immutable samplers
2963                //  If it has immutable samplers, we'll flag error later as needed depending on binding
2964                if (!pSet->IsUpdated()) {
2965                    for (auto binding : setBindingPair.second) {
2966                        if (!pSet->GetImmutableSamplerPtrFromBinding(binding)) {
2967                            result |= log_msg(
2968                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
2969                                (uint64_t)pSet->GetSet(), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
2970                                "DS 0x%" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
2971                                "this will result in undefined behavior.",
2972                                (uint64_t)pSet->GetSet());
2973                        }
2974                    }
2975                }
2976            }
2977        }
2978        // For given active slots, verify any dynamic descriptors and record updated images & buffers
2979        result |= validate_and_update_drawtime_descriptor_state(my_data, pCB, activeSetBindingsPairs);
2980    }
2981
2982    // Check general pipeline state that needs to be validated at drawtime
2983    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
2984        result |= validatePipelineDrawtimeState(my_data, state, pCB, pPipe);
2985
2986    return result;
2987}
2988
2989// Validate HW line width capabilities prior to setting requested line width.
2990static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) {
2991    bool skip_call = false;
2992
2993    // First check to see if the physical device supports wide lines.
2994    if ((VK_FALSE == my_data->phys_dev_properties.features.wideLines) && (1.0f != lineWidth)) {
2995        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__,
2996                             dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature "
2997                                            "not supported/enabled so lineWidth must be 1.0f!",
2998                             lineWidth);
2999    } else {
3000        // Otherwise, make sure the width falls in the valid range.
3001        if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
3002            (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
3003            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target,
3004                                 __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width "
3005                                                          "to between [%f, %f]!",
3006                                 lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0],
3007                                 my_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
3008        }
3009    }
3010
3011    return skip_call;
3012}
3013
3014// Verify that create state for a pipeline is valid
3015static bool verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
3016                                      int pipelineIndex) {
3017    bool skipCall = false;
3018
3019    PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
3020
3021    // If create derivative bit is set, check that we've specified a base
3022    // pipeline correctly, and that the base pipeline was created to allow
3023    // derivatives.
3024    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3025        PIPELINE_NODE *pBasePipeline = nullptr;
3026        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3027              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3028            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3029                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3030                                "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3031        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3032            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3033                skipCall |=
3034                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3035                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3036                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
3037            } else {
3038                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3039            }
3040        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3041            pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3042        }
3043
3044        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3045            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3046                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3047                                "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3048        }
3049    }
3050
3051    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3052        if (!my_data->phys_dev_properties.features.independentBlend) {
3053            if (pPipeline->attachments.size() > 1) {
3054                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3055                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3056                    if ((pAttachments[0].blendEnable != pAttachments[i].blendEnable) ||
3057                        (pAttachments[0].srcColorBlendFactor != pAttachments[i].srcColorBlendFactor) ||
3058                        (pAttachments[0].dstColorBlendFactor != pAttachments[i].dstColorBlendFactor) ||
3059                        (pAttachments[0].colorBlendOp != pAttachments[i].colorBlendOp) ||
3060                        (pAttachments[0].srcAlphaBlendFactor != pAttachments[i].srcAlphaBlendFactor) ||
3061                        (pAttachments[0].dstAlphaBlendFactor != pAttachments[i].dstAlphaBlendFactor) ||
3062                        (pAttachments[0].alphaBlendOp != pAttachments[i].alphaBlendOp) ||
3063                        (pAttachments[0].colorWriteMask != pAttachments[i].colorWriteMask)) {
3064                        skipCall |=
3065                            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3066                            DRAWSTATE_INDEPENDENT_BLEND, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
3067                            "enabled, all elements of pAttachments must be identical");
3068                    }
3069                }
3070            }
3071        }
3072        if (!my_data->phys_dev_properties.features.logicOp &&
3073            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3074            skipCall |=
3075                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3076                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
3077                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
3078        }
3079        if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) &&
3080            ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) ||
3081             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
3082            skipCall |=
3083                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3084                        DRAWSTATE_INVALID_LOGIC_OP, "DS",
3085                        "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
3086        }
3087    }
3088
3089    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3090    // produces nonsense errors that confuse users. Other layers should already
3091    // emit errors for renderpass being invalid.
3092    auto renderPass = getRenderPass(my_data, pPipeline->graphicsPipelineCI.renderPass);
3093    if (renderPass &&
3094        pPipeline->graphicsPipelineCI.subpass >= renderPass->pCreateInfo->subpassCount) {
3095        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3096                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3097                                                                           "is out of range for this renderpass (0..%u)",
3098                            pPipeline->graphicsPipelineCI.subpass, renderPass->pCreateInfo->subpassCount - 1);
3099    }
3100
3101    if (!validate_and_capture_pipeline_shader_state(my_data->report_data, pPipeline, &my_data->phys_dev_properties.features,
3102                                                    my_data->shaderModuleMap)) {
3103        skipCall = true;
3104    }
3105    // Each shader's stage must be unique
3106    if (pPipeline->duplicate_shaders) {
3107        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
3108            if (pPipeline->duplicate_shaders & stage) {
3109                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3110                                    __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3111                                    "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
3112                                    string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
3113            }
3114        }
3115    }
3116    // VS is required
3117    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3118        skipCall |=
3119            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3120                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3121    }
3122    // Either both or neither TC/TE shaders should be defined
3123    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3124        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3125        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3126                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3127                            "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3128    }
3129    // Compute shaders should be specified independent of Gfx shaders
3130    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3131        (pPipeline->active_shaders &
3132         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3133          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3134        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3135                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3136                            "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3137    }
3138    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3139    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3140    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3141        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3142         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3143        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3144                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3145                                                                           "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3146                                                                           "topology for tessellation pipelines");
3147    }
3148    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3149        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3150        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3151            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3152                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3153                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3154                                                                               "topology is only valid for tessellation pipelines");
3155        }
3156        if (!pPipeline->graphicsPipelineCI.pTessellationState) {
3157            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3158                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3159                                "Invalid Pipeline CreateInfo State: "
3160                                "pTessellationState is NULL when VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3161                                "topology used. pTessellationState must not be NULL in this case.");
3162        } else if (!pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints ||
3163                   (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints > 32)) {
3164            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3165                                DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3166                                                                               "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3167                                                                               "topology used with patchControlPoints value %u."
3168                                                                               " patchControlPoints should be >0 and <=32.",
3169                                pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints);
3170        }
3171    }
3172    // If a rasterization state is provided, make sure that the line width conforms to the HW.
3173    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3174        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
3175            skipCall |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, reinterpret_cast<uint64_t &>(pPipeline),
3176                                        pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
3177        }
3178    }
3179    // Viewport state must be included if rasterization is enabled.
3180    // If the viewport state is included, the viewport and scissor counts should always match.
3181    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3182    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3183        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3184        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3185            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3186                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3187                                                                           "and scissors are dynamic PSO must include "
3188                                                                           "viewportCount and scissorCount in pViewportState.");
3189        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3190                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3191            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3192                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3193                                "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3194                                pPipeline->graphicsPipelineCI.pViewportState->viewportCount,
3195                                pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3196        } else {
3197            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3198            bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3199            bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3200            if (!dynViewport) {
3201                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3202                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3203                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3204                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3205                                        "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3206                                        "must either include pViewports data, or include viewport in pDynamicState and set it with "
3207                                        "vkCmdSetViewport().",
3208                                        pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3209                }
3210            }
3211            if (!dynScissor) {
3212                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3213                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3214                    skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3215                                        __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3216                                        "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3217                                        "must either include pScissors data, or include scissor in pDynamicState and set it with "
3218                                        "vkCmdSetScissor().",
3219                                        pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3220                }
3221            }
3222        }
3223    }
3224    return skipCall;
3225}
3226
3227// Free the Pipeline nodes
3228static void deletePipelines(layer_data *my_data) {
3229    if (my_data->pipelineMap.size() <= 0)
3230        return;
3231    for (auto &pipe_map_pair : my_data->pipelineMap) {
3232        delete pipe_map_pair.second;
3233    }
3234    my_data->pipelineMap.clear();
3235}
3236
3237// Block of code at start here specifically for managing/tracking DSs
3238
3239// Return Pool node ptr for specified pool or else NULL
3240DESCRIPTOR_POOL_NODE *getPoolNode(const layer_data *dev_data, const VkDescriptorPool pool) {
3241    auto pool_it = dev_data->descriptorPoolMap.find(pool);
3242    if (pool_it == dev_data->descriptorPoolMap.end()) {
3243        return NULL;
3244    }
3245    return pool_it->second;
3246}
3247
3248// Return false if update struct is of valid type, otherwise flag error and return code from callback
3249static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3250    switch (pUpdateStruct->sType) {
3251    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3252    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3253        return false;
3254    default:
3255        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3256                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3257                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3258                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3259    }
3260}
3261
3262// Set count for given update struct in the last parameter
3263static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3264    switch (pUpdateStruct->sType) {
3265    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3266        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3267    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3268        // TODO : Need to understand this case better and make sure code is correct
3269        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3270    default:
3271        return 0;
3272    }
3273}
3274
3275// For given layout and update, return the first overall index of the layout that is updated
3276static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3277                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3278    return binding_start_index + arrayIndex;
3279}
3280// For given layout and update, return the last overall index of the layout that is updated
3281static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3282                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3283    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3284    return binding_start_index + arrayIndex + count - 1;
3285}
3286// Verify that the descriptor type in the update struct matches what's expected by the layout
3287static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type,
3288                                      const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3289    // First get actual type of update
3290    bool skipCall = false;
3291    VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM;
3292    switch (pUpdateStruct->sType) {
3293    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3294        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3295        break;
3296    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3297        /* no need to validate */
3298        return false;
3299        break;
3300    default:
3301        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3302                            DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3303                            "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3304                            string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3305    }
3306    if (!skipCall) {
3307        if (layout_type != actualType) {
3308            skipCall |= log_msg(
3309                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3310                DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3311                "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3312                string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type));
3313        }
3314    }
3315    return skipCall;
3316}
3317//TODO: Consolidate functions
3318bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
3319    layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
3320    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3321        return false;
3322    }
3323    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3324    imgpair.subresource.aspectMask = aspectMask;
3325    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3326    if (imgsubIt == pCB->imageLayoutMap.end()) {
3327        return false;
3328    }
3329    if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
3330        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3331                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3332                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3333                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout));
3334    }
3335    if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
3336        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3337                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3338                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s",
3339                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout));
3340    }
3341    node = imgsubIt->second;
3342    return true;
3343}
3344
3345bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
3346    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3347        return false;
3348    }
3349    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3350    imgpair.subresource.aspectMask = aspectMask;
3351    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3352    if (imgsubIt == my_data->imageLayoutMap.end()) {
3353        return false;
3354    }
3355    if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
3356        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3357                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3358                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3359                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout));
3360    }
3361    layout = imgsubIt->second.layout;
3362    return true;
3363}
3364
3365// find layout(s) on the cmd buf level
3366bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3367    ImageSubresourcePair imgpair = {image, true, range};
3368    node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
3369    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
3370    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
3371    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
3372    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
3373    if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3374        imgpair = {image, false, VkImageSubresource()};
3375        auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3376        if (imgsubIt == pCB->imageLayoutMap.end())
3377            return false;
3378        node = imgsubIt->second;
3379    }
3380    return true;
3381}
3382
3383// find layout(s) on the global level
3384bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3385    layout = VK_IMAGE_LAYOUT_MAX_ENUM;
3386    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3387    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3388    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3389    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3390    if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3391        imgpair = {imgpair.image, false, VkImageSubresource()};
3392        auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3393        if (imgsubIt == my_data->imageLayoutMap.end())
3394            return false;
3395        layout = imgsubIt->second.layout;
3396    }
3397    return true;
3398}
3399
3400bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3401    ImageSubresourcePair imgpair = {image, true, range};
3402    return FindLayout(my_data, imgpair, layout);
3403}
3404
3405bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3406    auto sub_data = my_data->imageSubresourceMap.find(image);
3407    if (sub_data == my_data->imageSubresourceMap.end())
3408        return false;
3409    auto img_node = getImageNode(my_data, image);
3410    if (!img_node)
3411        return false;
3412    bool ignoreGlobal = false;
3413    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3414    // potential errors in this case.
3415    if (sub_data->second.size() >= (img_node->createInfo.arrayLayers * img_node->createInfo.mipLevels + 1)) {
3416        ignoreGlobal = true;
3417    }
3418    for (auto imgsubpair : sub_data->second) {
3419        if (ignoreGlobal && !imgsubpair.hasSubresource)
3420            continue;
3421        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3422        if (img_data != my_data->imageLayoutMap.end()) {
3423            layouts.push_back(img_data->second.layout);
3424        }
3425    }
3426    return true;
3427}
3428
3429// Set the layout on the global level
3430void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3431    VkImage &image = imgpair.image;
3432    // TODO (mlentine): Maybe set format if new? Not used atm.
3433    my_data->imageLayoutMap[imgpair].layout = layout;
3434    // TODO (mlentine): Maybe make vector a set?
3435    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3436    if (subresource == my_data->imageSubresourceMap[image].end()) {
3437        my_data->imageSubresourceMap[image].push_back(imgpair);
3438    }
3439}
3440
3441// Set the layout on the cmdbuf level
3442void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3443    pCB->imageLayoutMap[imgpair] = node;
3444    // TODO (mlentine): Maybe make vector a set?
3445    auto subresource =
3446        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3447    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3448        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3449    }
3450}
3451
3452void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3453    // TODO (mlentine): Maybe make vector a set?
3454    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3455        pCB->imageSubresourceMap[imgpair.image].end()) {
3456        pCB->imageLayoutMap[imgpair].layout = layout;
3457    } else {
3458        // TODO (mlentine): Could be expensive and might need to be removed.
3459        assert(imgpair.hasSubresource);
3460        IMAGE_CMD_BUF_LAYOUT_NODE node;
3461        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3462            node.initialLayout = layout;
3463        }
3464        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3465    }
3466}
3467
3468template <class OBJECT, class LAYOUT>
3469void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3470    if (imgpair.subresource.aspectMask & aspectMask) {
3471        imgpair.subresource.aspectMask = aspectMask;
3472        SetLayout(pObject, imgpair, layout);
3473    }
3474}
3475
3476template <class OBJECT, class LAYOUT>
3477void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3478    ImageSubresourcePair imgpair = {image, true, range};
3479    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3480    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3481    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3482    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3483}
3484
3485template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3486    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3487    SetLayout(pObject, image, imgpair, layout);
3488}
3489
3490void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3491    auto iv_data = getImageViewData(dev_data, imageView);
3492    assert(iv_data);
3493    const VkImage &image = iv_data->image;
3494    const VkImageSubresourceRange &subRange = iv_data->subresourceRange;
3495    // TODO: Do not iterate over every possibility - consolidate where possible
3496    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3497        uint32_t level = subRange.baseMipLevel + j;
3498        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3499            uint32_t layer = subRange.baseArrayLayer + k;
3500            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3501            SetLayout(pCB, image, sub, layout);
3502        }
3503    }
3504}
3505
3506// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3507// func_str is the name of the calling function
3508// Return false if no errors occur
3509// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
3510static bool validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
3511    bool skip_call = false;
3512    auto set_node = my_data->setMap.find(set);
3513    if (set_node == my_data->setMap.end()) {
3514        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3515                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3516                             "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3517                             (uint64_t)(set));
3518    } else {
3519        if (set_node->second->in_use.load()) {
3520            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3521                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
3522                                 "DS", "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer.",
3523                                 func_str.c_str(), (uint64_t)(set));
3524        }
3525    }
3526    return skip_call;
3527}
3528
3529// Remove set from setMap and delete the set
3530static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
3531    dev_data->setMap.erase(descriptor_set->GetSet());
3532    delete descriptor_set;
3533}
3534// Free all DS Pools including their Sets & related sub-structs
3535// NOTE : Calls to this function should be wrapped in mutex
3536static void deletePools(layer_data *my_data) {
3537    if (my_data->descriptorPoolMap.size() <= 0)
3538        return;
3539    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
3540        // Remove this pools' sets from setMap and delete them
3541        for (auto ds : (*ii).second->sets) {
3542            freeDescriptorSet(my_data, ds);
3543        }
3544        (*ii).second->sets.clear();
3545    }
3546    my_data->descriptorPoolMap.clear();
3547}
3548
3549static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
3550                                VkDescriptorPoolResetFlags flags) {
3551    DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
3552    if (!pPool) {
3553        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
3554                (uint64_t)pool, __LINE__, DRAWSTATE_INVALID_POOL, "DS",
3555                "Unable to find pool node for pool 0x%" PRIxLEAST64 " specified in vkResetDescriptorPool() call", (uint64_t)pool);
3556    } else {
3557        // TODO: validate flags
3558        // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
3559        for (auto ds : pPool->sets) {
3560            freeDescriptorSet(my_data, ds);
3561        }
3562        pPool->sets.clear();
3563        // Reset available count for each type and available sets for this pool
3564        for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
3565            pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
3566        }
3567        pPool->availableSets = pPool->maxSets;
3568    }
3569}
3570
3571// For given CB object, fetch associated CB Node from map
3572static GLOBAL_CB_NODE *getCBNode(layer_data const *my_data, const VkCommandBuffer cb) {
3573    auto it = my_data->commandBufferMap.find(cb);
3574    if (it == my_data->commandBufferMap.end()) {
3575        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3576                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3577                "Attempt to use CommandBuffer 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
3578        return NULL;
3579    }
3580    return it->second;
3581}
3582// Free all CB Nodes
3583// NOTE : Calls to this function should be wrapped in mutex
3584static void deleteCommandBuffers(layer_data *my_data) {
3585    if (my_data->commandBufferMap.empty()) {
3586        return;
3587    }
3588    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
3589        delete (*ii).second;
3590    }
3591    my_data->commandBufferMap.clear();
3592}
3593
3594static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
3595    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3596                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
3597                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
3598}
3599
3600bool validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
3601    if (!pCB->activeRenderPass)
3602        return false;
3603    bool skip_call = false;
3604    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
3605        (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
3606        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3607                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3608                             "Commands cannot be called in a subpass using secondary command buffers.");
3609    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
3610        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3611                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3612                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
3613    }
3614    return skip_call;
3615}
3616
3617static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3618    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
3619        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3620                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3621                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3622    return false;
3623}
3624
3625static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3626    if (!(flags & VK_QUEUE_COMPUTE_BIT))
3627        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3628                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3629                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
3630    return false;
3631}
3632
3633static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3634    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
3635        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3636                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3637                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3638    return false;
3639}
3640
3641// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
3642//  in the recording state or if there's an issue with the Cmd ordering
3643static bool addCmd(layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
3644    bool skipCall = false;
3645    auto pPool = getCommandPoolNode(my_data, pCB->createInfo.commandPool);
3646    if (pPool) {
3647        VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].queueFlags;
3648        switch (cmd) {
3649        case CMD_BINDPIPELINE:
3650        case CMD_BINDPIPELINEDELTA:
3651        case CMD_BINDDESCRIPTORSETS:
3652        case CMD_FILLBUFFER:
3653        case CMD_CLEARCOLORIMAGE:
3654        case CMD_SETEVENT:
3655        case CMD_RESETEVENT:
3656        case CMD_WAITEVENTS:
3657        case CMD_BEGINQUERY:
3658        case CMD_ENDQUERY:
3659        case CMD_RESETQUERYPOOL:
3660        case CMD_COPYQUERYPOOLRESULTS:
3661        case CMD_WRITETIMESTAMP:
3662            skipCall |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3663            break;
3664        case CMD_SETVIEWPORTSTATE:
3665        case CMD_SETSCISSORSTATE:
3666        case CMD_SETLINEWIDTHSTATE:
3667        case CMD_SETDEPTHBIASSTATE:
3668        case CMD_SETBLENDSTATE:
3669        case CMD_SETDEPTHBOUNDSSTATE:
3670        case CMD_SETSTENCILREADMASKSTATE:
3671        case CMD_SETSTENCILWRITEMASKSTATE:
3672        case CMD_SETSTENCILREFERENCESTATE:
3673        case CMD_BINDINDEXBUFFER:
3674        case CMD_BINDVERTEXBUFFER:
3675        case CMD_DRAW:
3676        case CMD_DRAWINDEXED:
3677        case CMD_DRAWINDIRECT:
3678        case CMD_DRAWINDEXEDINDIRECT:
3679        case CMD_BLITIMAGE:
3680        case CMD_CLEARATTACHMENTS:
3681        case CMD_CLEARDEPTHSTENCILIMAGE:
3682        case CMD_RESOLVEIMAGE:
3683        case CMD_BEGINRENDERPASS:
3684        case CMD_NEXTSUBPASS:
3685        case CMD_ENDRENDERPASS:
3686            skipCall |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
3687            break;
3688        case CMD_DISPATCH:
3689        case CMD_DISPATCHINDIRECT:
3690            skipCall |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3691            break;
3692        case CMD_COPYBUFFER:
3693        case CMD_COPYIMAGE:
3694        case CMD_COPYBUFFERTOIMAGE:
3695        case CMD_COPYIMAGETOBUFFER:
3696        case CMD_CLONEIMAGEDATA:
3697        case CMD_UPDATEBUFFER:
3698        case CMD_PIPELINEBARRIER:
3699        case CMD_EXECUTECOMMANDS:
3700        case CMD_END:
3701            break;
3702        default:
3703            break;
3704        }
3705    }
3706    if (pCB->state != CB_RECORDING) {
3707        skipCall |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
3708    } else {
3709        skipCall |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
3710        CMD_NODE cmdNode = {};
3711        // init cmd node and append to end of cmd LL
3712        cmdNode.cmdNumber = ++pCB->numCmds;
3713        cmdNode.type = cmd;
3714        pCB->cmds.push_back(cmdNode);
3715    }
3716    return skipCall;
3717}
3718// Reset the command buffer state
3719//  Maintain the createInfo and set state to CB_NEW, but clear all other state
3720static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
3721    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
3722    if (pCB) {
3723        pCB->in_use.store(0);
3724        pCB->cmds.clear();
3725        // Reset CB state (note that createInfo is not cleared)
3726        pCB->commandBuffer = cb;
3727        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
3728        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
3729        pCB->numCmds = 0;
3730        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
3731        pCB->state = CB_NEW;
3732        pCB->submitCount = 0;
3733        pCB->status = 0;
3734        pCB->viewports.clear();
3735        pCB->scissors.clear();
3736
3737        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
3738            // Before clearing lastBoundState, remove any CB bindings from all uniqueBoundSets
3739            for (auto set : pCB->lastBound[i].uniqueBoundSets) {
3740                set->RemoveBoundCommandBuffer(pCB);
3741            }
3742            pCB->lastBound[i].reset();
3743        }
3744
3745        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
3746        pCB->activeRenderPass = nullptr;
3747        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
3748        pCB->activeSubpass = 0;
3749        pCB->broken_bindings.clear();
3750        pCB->waitedEvents.clear();
3751        pCB->events.clear();
3752        pCB->writeEventsBeforeWait.clear();
3753        pCB->waitedEventsBeforeQueryReset.clear();
3754        pCB->queryToStateMap.clear();
3755        pCB->activeQueries.clear();
3756        pCB->startedQueries.clear();
3757        pCB->imageSubresourceMap.clear();
3758        pCB->imageLayoutMap.clear();
3759        pCB->eventToStageMap.clear();
3760        pCB->drawData.clear();
3761        pCB->currentDrawData.buffers.clear();
3762        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
3763        // Make sure any secondaryCommandBuffers are removed from globalInFlight
3764        for (auto secondary_cb : pCB->secondaryCommandBuffers) {
3765            dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
3766        }
3767        pCB->secondaryCommandBuffers.clear();
3768        pCB->updateImages.clear();
3769        pCB->updateBuffers.clear();
3770        clear_cmd_buf_and_mem_references(dev_data, pCB);
3771        pCB->eventUpdates.clear();
3772        pCB->queryUpdates.clear();
3773
3774        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
3775        for (auto framebuffer : pCB->framebuffers) {
3776            auto fb_node = getFramebuffer(dev_data, framebuffer);
3777            if (fb_node)
3778                fb_node->cb_bindings.erase(pCB);
3779        }
3780        pCB->framebuffers.clear();
3781        pCB->activeFramebuffer = VK_NULL_HANDLE;
3782    }
3783}
3784
3785// Set PSO-related status bits for CB, including dynamic state set via PSO
3786static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
3787    // Account for any dynamic state not set via this PSO
3788    if (!pPipe->graphicsPipelineCI.pDynamicState ||
3789        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static
3790        pCB->status = CBSTATUS_ALL;
3791    } else {
3792        // First consider all state on
3793        // Then unset any state that's noted as dynamic in PSO
3794        // Finally OR that into CB statemask
3795        CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
3796        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
3797            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
3798            case VK_DYNAMIC_STATE_VIEWPORT:
3799                psoDynStateMask &= ~CBSTATUS_VIEWPORT_SET;
3800                break;
3801            case VK_DYNAMIC_STATE_SCISSOR:
3802                psoDynStateMask &= ~CBSTATUS_SCISSOR_SET;
3803                break;
3804            case VK_DYNAMIC_STATE_LINE_WIDTH:
3805                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
3806                break;
3807            case VK_DYNAMIC_STATE_DEPTH_BIAS:
3808                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
3809                break;
3810            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
3811                psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
3812                break;
3813            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
3814                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
3815                break;
3816            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
3817                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
3818                break;
3819            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
3820                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
3821                break;
3822            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
3823                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
3824                break;
3825            default:
3826                // TODO : Flag error here
3827                break;
3828            }
3829        }
3830        pCB->status |= psoDynStateMask;
3831    }
3832}
3833
3834// Print the last bound Gfx Pipeline
3835static bool printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
3836    bool skipCall = false;
3837    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
3838    if (pCB) {
3839        PIPELINE_NODE *pPipeTrav = getPipeline(my_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
3840        if (!pPipeTrav) {
3841            // nothing to print
3842        } else {
3843            skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3844                                __LINE__, DRAWSTATE_NONE, "DS", "%s",
3845                                vk_print_vkgraphicspipelinecreateinfo(
3846                                    reinterpret_cast<const VkGraphicsPipelineCreateInfo *>(&pPipeTrav->graphicsPipelineCI), "{DS}")
3847                                    .c_str());
3848        }
3849    }
3850    return skipCall;
3851}
3852
3853static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
3854    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
3855    if (pCB && pCB->cmds.size() > 0) {
3856        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3857                DRAWSTATE_NONE, "DS", "Cmds in CB 0x%p", (void *)cb);
3858        vector<CMD_NODE> cmds = pCB->cmds;
3859        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
3860            // TODO : Need to pass cb as srcObj here
3861            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
3862                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD 0x%" PRIx64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
3863        }
3864    } else {
3865        // Nothing to print
3866    }
3867}
3868
3869static bool synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
3870    bool skipCall = false;
3871    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
3872        return skipCall;
3873    }
3874    skipCall |= printPipeline(my_data, cb);
3875    return skipCall;
3876}
3877
3878// Flags validation error if the associated call is made inside a render pass. The apiName
3879// routine should ONLY be called outside a render pass.
3880static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
3881    bool inside = false;
3882    if (pCB->activeRenderPass) {
3883        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3884                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
3885                         "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 ")", apiName,
3886                         (uint64_t)pCB->activeRenderPass->renderPass);
3887    }
3888    return inside;
3889}
3890
3891// Flags validation error if the associated call is made outside a render pass. The apiName
3892// routine should ONLY be called inside a render pass.
3893static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
3894    bool outside = false;
3895    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
3896        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
3897         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
3898        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3899                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
3900                          "%s: This call must be issued inside an active render pass.", apiName);
3901    }
3902    return outside;
3903}
3904
3905static void init_core_validation(layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
3906
3907    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
3908
3909}
3910
3911VKAPI_ATTR VkResult VKAPI_CALL
3912CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
3913    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
3914
3915    assert(chain_info->u.pLayerInfo);
3916    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
3917    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
3918    if (fpCreateInstance == NULL)
3919        return VK_ERROR_INITIALIZATION_FAILED;
3920
3921    // Advance the link info for the next element on the chain
3922    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
3923
3924    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
3925    if (result != VK_SUCCESS)
3926        return result;
3927
3928    layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
3929    instance_data->instance = *pInstance;
3930    instance_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
3931    layer_init_instance_dispatch_table(*pInstance, instance_data->instance_dispatch_table, fpGetInstanceProcAddr);
3932
3933    instance_data->report_data =
3934        debug_report_create_instance(instance_data->instance_dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount,
3935                                     pCreateInfo->ppEnabledExtensionNames);
3936    init_core_validation(instance_data, pAllocator);
3937
3938    instance_data->instance_state = unique_ptr<INSTANCE_STATE>(new INSTANCE_STATE());
3939    ValidateLayerOrdering(*pCreateInfo);
3940
3941    return result;
3942}
3943
3944/* hook DestroyInstance to remove tableInstanceMap entry */
3945VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
3946    // TODOSC : Shouldn't need any customization here
3947    dispatch_key key = get_dispatch_key(instance);
3948    // TBD: Need any locking this early, in case this function is called at the
3949    // same time by more than one thread?
3950    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
3951    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
3952    pTable->DestroyInstance(instance, pAllocator);
3953
3954    std::lock_guard<std::mutex> lock(global_lock);
3955    // Clean up logging callback, if any
3956    while (my_data->logging_callback.size() > 0) {
3957        VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
3958        layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
3959        my_data->logging_callback.pop_back();
3960    }
3961
3962    layer_debug_report_destroy_instance(my_data->report_data);
3963    delete my_data->instance_dispatch_table;
3964    layer_data_map.erase(key);
3965}
3966
3967static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
3968    uint32_t i;
3969    // TBD: Need any locking, in case this function is called at the same time
3970    // by more than one thread?
3971    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
3972    dev_data->device_extensions.wsi_enabled = false;
3973
3974    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
3975    PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
3976    pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
3977    pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
3978    pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
3979    pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
3980    pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
3981
3982    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
3983        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
3984            dev_data->device_extensions.wsi_enabled = true;
3985    }
3986}
3987
3988// Verify that queue family has been properly requested
3989bool ValidateRequestedQueueFamilyProperties(layer_data *dev_data, const VkDeviceCreateInfo *create_info) {
3990    bool skip_call = false;
3991    // First check is app has actually requested queueFamilyProperties
3992    if (!dev_data->physical_device_state) {
3993        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
3994                             0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
3995                             "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
3996    } else if (QUERY_DETAILS != dev_data->physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
3997        // TODO: This is not called out as an invalid use in the spec so make more informative recommendation.
3998        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
3999                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST,
4000                             "DL", "Call to vkCreateDevice() w/o first calling vkGetPhysicalDeviceQueueFamilyProperties().");
4001    } else {
4002        // Check that the requested queue properties are valid
4003        for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
4004            uint32_t requestedIndex = create_info->pQueueCreateInfos[i].queueFamilyIndex;
4005            if (dev_data->queue_family_properties.size() <=
4006                requestedIndex) { // requested index is out of bounds for this physical device
4007                skip_call |= log_msg(
4008                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
4009                    __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4010                    "Invalid queue create request in vkCreateDevice(). Invalid queueFamilyIndex %u requested.", requestedIndex);
4011            } else if (create_info->pQueueCreateInfos[i].queueCount >
4012                       dev_data->queue_family_properties[requestedIndex]->queueCount) {
4013                skip_call |=
4014                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
4015                            0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4016                            "Invalid queue create request in vkCreateDevice(). QueueFamilyIndex %u only has %u queues, but "
4017                            "requested queueCount is %u.",
4018                            requestedIndex, dev_data->queue_family_properties[requestedIndex]->queueCount,
4019                            create_info->pQueueCreateInfos[i].queueCount);
4020            }
4021        }
4022    }
4023    return skip_call;
4024}
4025
4026// Verify that features have been queried and that they are available
4027static bool ValidateRequestedFeatures(layer_data *dev_data, const VkPhysicalDeviceFeatures *requested_features) {
4028    bool skip_call = false;
4029
4030    VkBool32 *actual = reinterpret_cast<VkBool32 *>(&(dev_data->physical_device_features));
4031    const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
4032    // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
4033    //  Need to provide the struct member name with the issue. To do that seems like we'll
4034    //  have to loop through each struct member which should be done w/ codegen to keep in synch.
4035    uint32_t errors = 0;
4036    uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
4037    for (uint32_t i = 0; i < total_bools; i++) {
4038        if (requested[i] > actual[i]) {
4039            // TODO: Add index to struct member name helper to be able to include a feature name
4040            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4041                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4042                "DL", "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, "
4043                "which is not available on this device.",
4044                i);
4045            errors++;
4046        }
4047    }
4048    if (errors && (UNCALLED == dev_data->physical_device_state->vkGetPhysicalDeviceFeaturesState)) {
4049        // If user didn't request features, notify them that they should
4050        // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
4051        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4052                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4053                             "DL", "You requested features that are unavailable on this device. You should first query feature "
4054                                   "availability by calling vkGetPhysicalDeviceFeatures().");
4055    }
4056    return skip_call;
4057}
4058
4059VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4060                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4061    layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
4062    bool skip_call = false;
4063
4064    // Check that any requested features are available
4065    if (pCreateInfo->pEnabledFeatures) {
4066        skip_call |= ValidateRequestedFeatures(my_instance_data, pCreateInfo->pEnabledFeatures);
4067    }
4068    skip_call |= ValidateRequestedQueueFamilyProperties(my_instance_data, pCreateInfo);
4069
4070    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4071
4072    assert(chain_info->u.pLayerInfo);
4073    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4074    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4075    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice");
4076    if (fpCreateDevice == NULL) {
4077        return VK_ERROR_INITIALIZATION_FAILED;
4078    }
4079
4080    // Advance the link info for the next element on the chain
4081    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4082
4083    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4084    if (result != VK_SUCCESS) {
4085        return result;
4086    }
4087
4088    std::unique_lock<std::mutex> lock(global_lock);
4089    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4090
4091    // Setup device dispatch table
4092    my_device_data->device_dispatch_table = new VkLayerDispatchTable;
4093    layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
4094    my_device_data->device = *pDevice;
4095
4096    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4097    createDeviceRegisterExtensions(pCreateInfo, *pDevice);
4098    // Get physical device limits for this device
4099    my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties));
4100    uint32_t count;
4101    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4102    my_device_data->phys_dev_properties.queue_family_properties.resize(count);
4103    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
4104        gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]);
4105    // TODO: device limits should make sure these are compatible
4106    if (pCreateInfo->pEnabledFeatures) {
4107        my_device_data->phys_dev_properties.features = *pCreateInfo->pEnabledFeatures;
4108    } else {
4109        memset(&my_device_data->phys_dev_properties.features, 0, sizeof(VkPhysicalDeviceFeatures));
4110    }
4111    // Store physical device mem limits into device layer_data struct
4112    my_instance_data->instance_dispatch_table->GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props);
4113    lock.unlock();
4114
4115    ValidateLayerOrdering(*pCreateInfo);
4116
4117    return result;
4118}
4119
4120// prototype
4121static void deleteRenderPasses(layer_data *);
4122VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4123    // TODOSC : Shouldn't need any customization here
4124    dispatch_key key = get_dispatch_key(device);
4125    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4126    // Free all the memory
4127    std::unique_lock<std::mutex> lock(global_lock);
4128    deletePipelines(dev_data);
4129    deleteRenderPasses(dev_data);
4130    deleteCommandBuffers(dev_data);
4131    // This will also delete all sets in the pool & remove them from setMap
4132    deletePools(dev_data);
4133    // All sets should be removed
4134    assert(dev_data->setMap.empty());
4135    for (auto del_layout : dev_data->descriptorSetLayoutMap) {
4136        delete del_layout.second;
4137    }
4138    dev_data->descriptorSetLayoutMap.clear();
4139    dev_data->imageViewMap.clear();
4140    dev_data->imageMap.clear();
4141    dev_data->imageSubresourceMap.clear();
4142    dev_data->imageLayoutMap.clear();
4143    dev_data->bufferViewMap.clear();
4144    dev_data->bufferMap.clear();
4145    // Queues persist until device is destroyed
4146    dev_data->queueMap.clear();
4147    lock.unlock();
4148#if MTMERGESOURCE
4149    bool skipCall = false;
4150    lock.lock();
4151    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4152            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4153    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4154            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4155    print_mem_list(dev_data);
4156    printCBList(dev_data);
4157    // Report any memory leaks
4158    DEVICE_MEM_INFO *pInfo = NULL;
4159    if (!dev_data->memObjMap.empty()) {
4160        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4161            pInfo = (*ii).second.get();
4162            if (pInfo->allocInfo.allocationSize != 0) {
4163                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4164                skipCall |=
4165                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4166                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK,
4167                            "MEM", "Mem Object 0x%" PRIx64 " has not been freed. You should clean up this memory by calling "
4168                                   "vkFreeMemory(0x%" PRIx64 ") prior to vkDestroyDevice().",
4169                            (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4170            }
4171        }
4172    }
4173    layer_debug_report_destroy_device(device);
4174    lock.unlock();
4175
4176#if DISPATCH_MAP_DEBUG
4177    fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
4178#endif
4179    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4180    if (!skipCall) {
4181        pDisp->DestroyDevice(device, pAllocator);
4182    }
4183#else
4184    dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
4185#endif
4186    delete dev_data->device_dispatch_table;
4187    layer_data_map.erase(key);
4188}
4189
4190static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4191
4192// This validates that the initial layout specified in the command buffer for
4193// the IMAGE is the same
4194// as the global IMAGE layout
4195static bool ValidateCmdBufImageLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4196    bool skip_call = false;
4197    for (auto cb_image_data : pCB->imageLayoutMap) {
4198        VkImageLayout imageLayout;
4199        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4200            skip_call |=
4201                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4202                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image 0x%" PRIx64 ".",
4203                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4204        } else {
4205            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4206                // TODO: Set memory invalid which is in mem_tracker currently
4207            } else if (imageLayout != cb_image_data.second.initialLayout) {
4208                if (cb_image_data.first.hasSubresource) {
4209                    skip_call |= log_msg(
4210                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4211                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4212                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") [sub-resource: aspectMask 0x%X array layer %u, mip level %u], "
4213                        "with layout %s when first use is %s.",
4214                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.aspectMask,
4215                                cb_image_data.first.subresource.arrayLayer,
4216                                cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout),
4217                        string_VkImageLayout(cb_image_data.second.initialLayout));
4218                } else {
4219                    skip_call |= log_msg(
4220                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4221                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4222                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") with layout %s when "
4223                        "first use is %s.",
4224                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4225                        string_VkImageLayout(cb_image_data.second.initialLayout));
4226                }
4227            }
4228            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4229        }
4230    }
4231    return skip_call;
4232}
4233
4234// Track which resources are in-flight by atomically incrementing their "in_use" count
4235static bool validateAndIncrementResources(layer_data *my_data, GLOBAL_CB_NODE *pCB) {
4236    bool skip_call = false;
4237
4238    pCB->in_use.fetch_add(1);
4239    my_data->globalInFlightCmdBuffers.insert(pCB->commandBuffer);
4240
4241    for (auto drawDataElement : pCB->drawData) {
4242        for (auto buffer : drawDataElement.buffers) {
4243            auto buffer_node = getBufferNode(my_data, buffer);
4244            if (!buffer_node) {
4245                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4246                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4247                                     "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer));
4248            } else {
4249                buffer_node->in_use.fetch_add(1);
4250            }
4251        }
4252    }
4253    for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4254        for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4255            if (!my_data->setMap.count(set->GetSet())) {
4256                skip_call |=
4257                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4258                            (uint64_t)(set), __LINE__, DRAWSTATE_INVALID_DESCRIPTOR_SET, "DS",
4259                            "Cannot submit cmd buffer using deleted descriptor set 0x%" PRIx64 ".", (uint64_t)(set));
4260            } else {
4261                set->in_use.fetch_add(1);
4262            }
4263        }
4264    }
4265    for (auto event : pCB->events) {
4266        auto eventNode = my_data->eventMap.find(event);
4267        if (eventNode == my_data->eventMap.end()) {
4268            skip_call |=
4269                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4270                        reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4271                        "Cannot submit cmd buffer using deleted event 0x%" PRIx64 ".", reinterpret_cast<uint64_t &>(event));
4272        } else {
4273            eventNode->second.in_use.fetch_add(1);
4274        }
4275    }
4276    for (auto event : pCB->writeEventsBeforeWait) {
4277        auto eventNode = my_data->eventMap.find(event);
4278        eventNode->second.write_in_use++;
4279    }
4280    return skip_call;
4281}
4282
4283// Note: This function assumes that the global lock is held by the calling
4284// thread.
4285static bool cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4286    bool skip_call = false;
4287    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4288    if (pCB) {
4289        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
4290            for (auto event : queryEventsPair.second) {
4291                if (my_data->eventMap[event].needsSignaled) {
4292                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4293                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
4294                                         "Cannot get query results on queryPool 0x%" PRIx64
4295                                         " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
4296                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
4297                }
4298            }
4299        }
4300    }
4301    return skip_call;
4302}
4303// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
4304static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
4305    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
4306    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer);
4307    pCB->in_use.fetch_sub(1);
4308    if (!pCB->in_use.load()) {
4309        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
4310    }
4311}
4312
4313static void decrementResources(layer_data *my_data, CB_SUBMISSION *submission) {
4314    for (auto cb : submission->cbs) {
4315        auto pCB = getCBNode(my_data, cb);
4316        for (auto drawDataElement : pCB->drawData) {
4317            for (auto buffer : drawDataElement.buffers) {
4318                auto buffer_node = getBufferNode(my_data, buffer);
4319                if (buffer_node) {
4320                    buffer_node->in_use.fetch_sub(1);
4321                }
4322            }
4323        }
4324        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4325            for (auto set : pCB->lastBound[i].uniqueBoundSets) {
4326                set->in_use.fetch_sub(1);
4327            }
4328        }
4329        for (auto event : pCB->events) {
4330            auto eventNode = my_data->eventMap.find(event);
4331            if (eventNode != my_data->eventMap.end()) {
4332                eventNode->second.in_use.fetch_sub(1);
4333            }
4334        }
4335        for (auto event : pCB->writeEventsBeforeWait) {
4336            auto eventNode = my_data->eventMap.find(event);
4337            if (eventNode != my_data->eventMap.end()) {
4338                eventNode->second.write_in_use--;
4339            }
4340        }
4341        for (auto queryStatePair : pCB->queryToStateMap) {
4342            my_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4343        }
4344        for (auto eventStagePair : pCB->eventToStageMap) {
4345            my_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4346        }
4347    }
4348
4349    for (auto semaphore : submission->semaphores) {
4350        auto pSemaphore = getSemaphoreNode(my_data, semaphore);
4351        if (pSemaphore) {
4352            pSemaphore->in_use.fetch_sub(1);
4353        }
4354    }
4355}
4356// For fenceCount fences in pFences, mark fence signaled, decrement in_use, and call
4357//  decrementResources for all priorFences and cmdBuffers associated with fence.
4358static bool decrementResources(layer_data *my_data, uint32_t fenceCount, const VkFence *pFences) {
4359    bool skip_call = false;
4360    std::vector<std::pair<VkFence, FENCE_NODE *>> fence_pairs;
4361    for (uint32_t i = 0; i < fenceCount; ++i) {
4362        auto pFence = getFenceNode(my_data, pFences[i]);
4363        if (!pFence || pFence->state != FENCE_INFLIGHT)
4364            continue;
4365
4366        fence_pairs.emplace_back(pFences[i], pFence);
4367        pFence->state = FENCE_RETIRED;
4368
4369        decrementResources(my_data, static_cast<uint32_t>(pFence->priorFences.size()),
4370                           pFence->priorFences.data());
4371        for (auto & submission : pFence->submissions) {
4372            decrementResources(my_data, &submission);
4373            for (auto cb : submission.cbs) {
4374                skip_call |= cleanInFlightCmdBuffer(my_data, cb);
4375                removeInFlightCmdBuffer(my_data, cb);
4376            }
4377        }
4378        pFence->submissions.clear();
4379        pFence->priorFences.clear();
4380    }
4381    for (auto fence_pair : fence_pairs) {
4382        for (auto queue : fence_pair.second->queues) {
4383            auto pQueue = getQueueNode(my_data, queue);
4384            if (pQueue) {
4385                auto last_fence_data =
4386                    std::find(pQueue->lastFences.begin(), pQueue->lastFences.end(), fence_pair.first);
4387                if (last_fence_data != pQueue->lastFences.end())
4388                    pQueue->lastFences.erase(last_fence_data);
4389            }
4390        }
4391        for (auto& fence_data : my_data->fenceMap) {
4392          auto prior_fence_data =
4393              std::find(fence_data.second.priorFences.begin(), fence_data.second.priorFences.end(), fence_pair.first);
4394          if (prior_fence_data != fence_data.second.priorFences.end())
4395              fence_data.second.priorFences.erase(prior_fence_data);
4396        }
4397    }
4398    return skip_call;
4399}
4400// Decrement in_use for all outstanding cmd buffers that were submitted on this queue
4401static bool decrementResources(layer_data *my_data, VkQueue queue) {
4402    bool skip_call = false;
4403    auto queue_data = my_data->queueMap.find(queue);
4404    if (queue_data != my_data->queueMap.end()) {
4405        for (auto & submission : queue_data->second.untrackedSubmissions) {
4406            decrementResources(my_data, &submission);
4407            for (auto cb : submission.cbs) {
4408                skip_call |= cleanInFlightCmdBuffer(my_data, cb);
4409                removeInFlightCmdBuffer(my_data, cb);
4410            }
4411        }
4412        queue_data->second.untrackedSubmissions.clear();
4413        skip_call |= decrementResources(my_data, static_cast<uint32_t>(queue_data->second.lastFences.size()),
4414                                        queue_data->second.lastFences.data());
4415    }
4416    return skip_call;
4417}
4418
4419// This function merges command buffer tracking between queues when there is a semaphore dependency
4420// between them (see below for details as to how tracking works). When this happens, the prior
4421// fences from the signaling queue are merged into the wait queue as well as any untracked command
4422// buffers.
4423static void updateTrackedCommandBuffers(layer_data *dev_data, VkQueue queue, VkQueue other_queue, VkFence fence) {
4424    if (queue == other_queue) {
4425        return;
4426    }
4427    auto pQueue = getQueueNode(dev_data, queue);
4428    auto pOtherQueue = getQueueNode(dev_data, other_queue);
4429    if (!pQueue || !pOtherQueue) {
4430        return;
4431    }
4432    for (auto fenceInner : pOtherQueue->lastFences) {
4433        pQueue->lastFences.push_back(fenceInner);
4434        auto pFenceInner = getFenceNode(dev_data, fenceInner);
4435        if (pFenceInner)
4436            pFenceInner->queues.insert(other_queue);
4437    }
4438    // TODO: Stealing the untracked CBs out of the signaling queue isn't really
4439    // correct. A subsequent submission + wait, or a QWI on that queue, or
4440    // another semaphore dependency to a third queue may /all/ provide
4441    // suitable proof that the work we're stealing here has completed on the
4442    // device, but we've lost that information by moving the tracking between
4443    // queues.
4444    auto pFence = getFenceNode(dev_data, fence);
4445    if (pFence) {
4446        for (auto submission : pOtherQueue->untrackedSubmissions) {
4447            pFence->submissions.push_back(submission);
4448        }
4449        pOtherQueue->untrackedSubmissions.clear();
4450    } else {
4451        for (auto submission : pOtherQueue->untrackedSubmissions) {
4452            pQueue->untrackedSubmissions.push_back(submission);
4453        }
4454        pOtherQueue->untrackedSubmissions.clear();
4455    }
4456    for (auto eventStagePair : pOtherQueue->eventToStageMap) {
4457        pQueue->eventToStageMap[eventStagePair.first] = eventStagePair.second;
4458    }
4459    for (auto queryStatePair : pOtherQueue->queryToStateMap) {
4460        pQueue->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4461    }
4462}
4463
4464// This is the core function for tracking command buffers. There are two primary ways command
4465// buffers are tracked. When submitted they are stored in the command buffer list associated
4466// with a fence or the untracked command buffer list associated with a queue if no fence is used.
4467// Each queue also stores the last fence that was submitted onto the queue. This allows us to
4468// create a linked list of fences and their associated command buffers so if one fence is
4469// waited on, prior fences on that queue are also considered to have been waited on. When a fence is
4470// waited on (either via a queue, device or fence), we free the cmd buffers for that fence and
4471// recursively call with the prior fences.
4472
4473
4474// Submit a fence to a queue, delimiting previous fences and previous untracked
4475// work by it.
4476static void
4477SubmitFence(QUEUE_NODE *pQueue, FENCE_NODE *pFence)
4478{
4479    assert(!pFence->priorFences.size());
4480    assert(!pFence->submissions.size());
4481
4482    std::swap(pFence->priorFences, pQueue->lastFences);
4483    std::swap(pFence->submissions, pQueue->untrackedSubmissions);
4484
4485    pFence->queues.insert(pQueue->queue);
4486    pFence->state = FENCE_INFLIGHT;
4487
4488    pQueue->lastFences.push_back(pFence->fence);
4489}
4490
4491static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4492    bool skip_call = false;
4493    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
4494        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4495        skip_call |=
4496            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4497                    __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
4498                    "Command Buffer 0x%" PRIx64 " is already in use and is not marked for simultaneous use.",
4499                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
4500    }
4501    return skip_call;
4502}
4503
4504static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4505    bool skipCall = false;
4506    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
4507    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
4508        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4509                            __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4510                            "CB 0x%" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
4511                            "set, but has been submitted 0x%" PRIxLEAST64 " times.",
4512                            (uint64_t)(pCB->commandBuffer), pCB->submitCount);
4513    }
4514    // Validate that cmd buffers have been updated
4515    if (CB_RECORDED != pCB->state) {
4516        if (CB_INVALID == pCB->state) {
4517            // Inform app of reason CB invalid
4518            for (auto obj : pCB->broken_bindings) {
4519                const char *type_str = object_type_to_string(obj.type);
4520                // Descriptor sets are a special case that can be either destroyed or updated to invalidated a CB
4521                const char *cause_str =
4522                    (obj.type == VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT) ? "destroyed or updated" : "destroyed";
4523
4524                skipCall |=
4525                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4526                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4527                            "You are submitting command buffer 0x%" PRIxLEAST64 " that is invalid because bound %s 0x%" PRIxLEAST64
4528                            " was %s.",
4529                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), type_str, obj.handle, cause_str);
4530            }
4531        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
4532            skipCall |=
4533                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4534                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
4535                        "You must call vkEndCommandBuffer() on CB 0x%" PRIxLEAST64 " before this call to vkQueueSubmit()!",
4536                        (uint64_t)(pCB->commandBuffer));
4537        }
4538    }
4539    return skipCall;
4540}
4541
4542static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4543    // Track in-use for resources off of primary and any secondary CBs
4544    bool skipCall = false;
4545
4546    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
4547    // on device
4548    skipCall |= validateCommandBufferSimultaneousUse(dev_data, pCB);
4549
4550    skipCall |= validateAndIncrementResources(dev_data, pCB);
4551
4552    if (!pCB->secondaryCommandBuffers.empty()) {
4553        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
4554            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
4555            skipCall |= validateAndIncrementResources(dev_data, pSubCB);
4556            if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
4557                !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4558                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4559                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4560                        "CB 0x%" PRIxLEAST64 " was submitted with secondary buffer 0x%" PRIxLEAST64
4561                        " but that buffer has subsequently been bound to "
4562                        "primary cmd buffer 0x%" PRIxLEAST64
4563                        " and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
4564                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
4565                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
4566            }
4567        }
4568    }
4569
4570    skipCall |= validateCommandBufferState(dev_data, pCB);
4571
4572    return skipCall;
4573}
4574
4575static bool
4576ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence)
4577{
4578    bool skipCall = false;
4579
4580    if (pFence) {
4581        if (pFence->state == FENCE_INFLIGHT) {
4582            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4583                                (uint64_t)(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4584                                "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(pFence->fence));
4585        }
4586
4587        else if (pFence->state == FENCE_RETIRED) {
4588            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4589                                reinterpret_cast<uint64_t &>(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4590                                "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
4591                                reinterpret_cast<uint64_t &>(pFence->fence));
4592        }
4593    }
4594
4595    return skipCall;
4596}
4597
4598
4599VKAPI_ATTR VkResult VKAPI_CALL
4600QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
4601    bool skipCall = false;
4602    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4603    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4604    std::unique_lock<std::mutex> lock(global_lock);
4605
4606    auto pQueue = getQueueNode(dev_data, queue);
4607    auto pFence = getFenceNode(dev_data, fence);
4608    skipCall |= ValidateFenceForSubmit(dev_data, pFence);
4609
4610    if (skipCall) {
4611        return VK_ERROR_VALIDATION_FAILED_EXT;
4612    }
4613
4614    // TODO : Review these old print functions and clean up as appropriate
4615    print_mem_list(dev_data);
4616    printCBList(dev_data);
4617
4618    // Mark the fence in-use.
4619    if (pFence) {
4620        SubmitFence(pQueue, pFence);
4621    }
4622
4623    // If a fence is supplied, all the command buffers for this call will be
4624    // delimited by that fence. Otherwise, they go in the untracked portion of
4625    // the queue, and may end up being delimited by a fence supplied in a
4626    // subsequent submission.
4627    auto & submitTarget = pFence ? pFence->submissions : pQueue->untrackedSubmissions;
4628
4629    // Now verify each individual submit
4630    std::unordered_set<VkQueue> processed_other_queues;
4631    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4632        const VkSubmitInfo *submit = &pSubmits[submit_idx];
4633        vector<VkSemaphore> semaphoreList;
4634        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
4635            VkSemaphore semaphore = submit->pWaitSemaphores[i];
4636            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4637            semaphoreList.push_back(semaphore);
4638            if (pSemaphore) {
4639                if (pSemaphore->signaled) {
4640                    pSemaphore->signaled = false;
4641                    pSemaphore->in_use.fetch_add(1);
4642                } else {
4643                    skipCall |=
4644                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4645                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4646                                "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
4647                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
4648                }
4649                VkQueue other_queue = pSemaphore->queue;
4650                if (other_queue != VK_NULL_HANDLE && !processed_other_queues.count(other_queue)) {
4651                    updateTrackedCommandBuffers(dev_data, queue, other_queue, fence);
4652                    processed_other_queues.insert(other_queue);
4653                }
4654            }
4655        }
4656        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
4657            VkSemaphore semaphore = submit->pSignalSemaphores[i];
4658            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4659            if (pSemaphore) {
4660                semaphoreList.push_back(semaphore);
4661                if (pSemaphore->signaled) {
4662                    skipCall |=
4663                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4664                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
4665                                "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
4666                                " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
4667                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
4668                                reinterpret_cast<uint64_t &>(pSemaphore->queue));
4669                } else {
4670                    pSemaphore->signaled = true;
4671                    pSemaphore->queue = queue;
4672                    pSemaphore->in_use.fetch_add(1);
4673                }
4674            }
4675        }
4676
4677        std::vector<VkCommandBuffer> cbs;
4678
4679        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
4680            auto pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
4681            skipCall |= ValidateCmdBufImageLayouts(dev_data, pCBNode);
4682            if (pCBNode) {
4683                cbs.push_back(submit->pCommandBuffers[i]);
4684                for (auto secondaryCmdBuffer : pCBNode->secondaryCommandBuffers) {
4685                    cbs.push_back(secondaryCmdBuffer);
4686                }
4687
4688                pCBNode->submitCount++; // increment submit count
4689                skipCall |= validatePrimaryCommandBufferState(dev_data, pCBNode);
4690                // Call submit-time functions to validate/update state
4691                for (auto &function : pCBNode->validate_functions) {
4692                    skipCall |= function();
4693                }
4694                for (auto &function : pCBNode->eventUpdates) {
4695                    skipCall |= function(queue);
4696                }
4697                for (auto &function : pCBNode->queryUpdates) {
4698                    skipCall |= function(queue);
4699                }
4700            }
4701        }
4702
4703        submitTarget.emplace_back(cbs, semaphoreList);
4704    }
4705    lock.unlock();
4706    if (!skipCall)
4707        result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence);
4708
4709    return result;
4710}
4711
4712VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
4713                                              const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
4714    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4715    VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
4716    // TODO : Track allocations and overall size here
4717    std::lock_guard<std::mutex> lock(global_lock);
4718    add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
4719    print_mem_list(my_data);
4720    return result;
4721}
4722
4723VKAPI_ATTR void VKAPI_CALL
4724FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
4725    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4726
4727    // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
4728    // Before freeing a memory object, an application must ensure the memory object is no longer
4729    // in use by the device—for example by command buffers queued for execution. The memory need
4730    // not yet be unbound from all images and buffers, but any further use of those images or
4731    // buffers (on host or device) for anything other than destroying those objects will result in
4732    // undefined behavior.
4733
4734    std::unique_lock<std::mutex> lock(global_lock);
4735    freeMemObjInfo(my_data, device, mem, false);
4736    print_mem_list(my_data);
4737    printCBList(my_data);
4738    lock.unlock();
4739    my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator);
4740}
4741
4742static bool validateMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
4743    bool skipCall = false;
4744
4745    if (size == 0) {
4746        skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4747                           (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4748                           "VkMapMemory: Attempting to map memory range of size zero");
4749    }
4750
4751    auto mem_element = my_data->memObjMap.find(mem);
4752    if (mem_element != my_data->memObjMap.end()) {
4753        auto mem_info = mem_element->second.get();
4754        // It is an application error to call VkMapMemory on an object that is already mapped
4755        if (mem_info->memRange.size != 0) {
4756            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4757                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4758                               "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem);
4759        }
4760
4761        // Validate that offset + size is within object's allocationSize
4762        if (size == VK_WHOLE_SIZE) {
4763            if (offset >= mem_info->allocInfo.allocationSize) {
4764                skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4765                                   VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
4766                                   "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
4767                                          " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
4768                                   offset, mem_info->allocInfo.allocationSize, mem_info->allocInfo.allocationSize);
4769            }
4770        } else {
4771            if ((offset + size) > mem_info->allocInfo.allocationSize) {
4772                skipCall =
4773                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4774                            (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4775                            "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64, offset,
4776                            size + offset, mem_info->allocInfo.allocationSize);
4777            }
4778        }
4779    }
4780    return skipCall;
4781}
4782
4783static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
4784    auto mem_info = getMemObjInfo(my_data, mem);
4785    if (mem_info) {
4786        mem_info->memRange.offset = offset;
4787        mem_info->memRange.size = size;
4788    }
4789}
4790
4791static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
4792    bool skipCall = false;
4793    auto mem_info = getMemObjInfo(my_data, mem);
4794    if (mem_info) {
4795        if (!mem_info->memRange.size) {
4796            // Valid Usage: memory must currently be mapped
4797            skipCall = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4798                               (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
4799                               "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
4800        }
4801        mem_info->memRange.size = 0;
4802        if (mem_info->pData) {
4803            free(mem_info->pData);
4804            mem_info->pData = 0;
4805        }
4806    }
4807    return skipCall;
4808}
4809
4810static char NoncoherentMemoryFillValue = 0xb;
4811
4812static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize size, void **ppData) {
4813    auto mem_info = getMemObjInfo(dev_data, mem);
4814    if (mem_info) {
4815        mem_info->pDriverData = *ppData;
4816        uint32_t index = mem_info->allocInfo.memoryTypeIndex;
4817        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
4818            mem_info->pData = 0;
4819        } else {
4820            if (size == VK_WHOLE_SIZE) {
4821                size = mem_info->allocInfo.allocationSize;
4822            }
4823            size_t convSize = (size_t)(size);
4824            mem_info->pData = malloc(2 * convSize);
4825            memset(mem_info->pData, NoncoherentMemoryFillValue, 2 * convSize);
4826            *ppData = static_cast<char *>(mem_info->pData) + (convSize / 2);
4827        }
4828    }
4829}
4830// Verify that state for fence being waited on is appropriate. That is,
4831//  a fence being waited on should not already be signalled and
4832//  it should have been submitted on a queue or during acquire next image
4833static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
4834    bool skipCall = false;
4835
4836    auto pFence = getFenceNode(dev_data, fence);
4837    if (pFence) {
4838        if (pFence->state == FENCE_UNSIGNALED) {
4839            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4840                                reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4841                                "%s called for fence 0x%" PRIxLEAST64 " which has not been submitted on a Queue or during "
4842                                "acquire next image.",
4843                                apiCall, reinterpret_cast<uint64_t &>(fence));
4844        }
4845    }
4846    return skipCall;
4847}
4848
4849VKAPI_ATTR VkResult VKAPI_CALL
4850WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
4851    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4852    bool skip_call = false;
4853    // Verify fence status of submitted fences
4854    std::unique_lock<std::mutex> lock(global_lock);
4855    for (uint32_t i = 0; i < fenceCount; i++) {
4856        skip_call |= verifyWaitFenceState(dev_data, pFences[i], "vkWaitForFences");
4857    }
4858    lock.unlock();
4859    if (skip_call)
4860        return VK_ERROR_VALIDATION_FAILED_EXT;
4861
4862    VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
4863
4864    if (result == VK_SUCCESS) {
4865        lock.lock();
4866        // When we know that all fences are complete we can clean/remove their CBs
4867        if (waitAll || fenceCount == 1) {
4868            skip_call |= decrementResources(dev_data, fenceCount, pFences);
4869        }
4870        // NOTE : Alternate case not handled here is when some fences have completed. In
4871        //  this case for app to guarantee which fences completed it will have to call
4872        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
4873        lock.unlock();
4874    }
4875    if (skip_call)
4876        return VK_ERROR_VALIDATION_FAILED_EXT;
4877    return result;
4878}
4879
4880VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
4881    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4882    bool skipCall = false;
4883    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4884    std::unique_lock<std::mutex> lock(global_lock);
4885    skipCall = verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
4886    lock.unlock();
4887
4888    if (skipCall)
4889        return result;
4890
4891    result = dev_data->device_dispatch_table->GetFenceStatus(device, fence);
4892    bool skip_call = false;
4893    lock.lock();
4894    if (result == VK_SUCCESS) {
4895        skipCall |= decrementResources(dev_data, 1, &fence);
4896    }
4897    lock.unlock();
4898    if (skip_call)
4899        return VK_ERROR_VALIDATION_FAILED_EXT;
4900    return result;
4901}
4902
4903VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
4904                                                            VkQueue *pQueue) {
4905    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4906    dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
4907    std::lock_guard<std::mutex> lock(global_lock);
4908
4909    // Add queue to tracking set only if it is new
4910    auto result = dev_data->queues.emplace(*pQueue);
4911    if (result.second == true) {
4912        QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
4913        pQNode->queue = *pQueue;
4914        pQNode->device = device;
4915    }
4916}
4917
4918VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
4919    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4920    bool skip_call = false;
4921    skip_call |= decrementResources(dev_data, queue);
4922    if (skip_call)
4923        return VK_ERROR_VALIDATION_FAILED_EXT;
4924    VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue);
4925    return result;
4926}
4927
4928VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
4929    bool skip_call = false;
4930    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4931    std::unique_lock<std::mutex> lock(global_lock);
4932    for (auto queue : dev_data->queues) {
4933        skip_call |= decrementResources(dev_data, queue);
4934    }
4935    dev_data->globalInFlightCmdBuffers.clear();
4936    lock.unlock();
4937    if (skip_call)
4938        return VK_ERROR_VALIDATION_FAILED_EXT;
4939    VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device);
4940    return result;
4941}
4942
4943VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
4944    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4945    bool skipCall = false;
4946    std::unique_lock<std::mutex> lock(global_lock);
4947    auto fence_pair = dev_data->fenceMap.find(fence);
4948    if (fence_pair != dev_data->fenceMap.end()) {
4949        if (fence_pair->second.state == FENCE_INFLIGHT) {
4950            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4951                                (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4952                                "Fence 0x%" PRIx64 " is in use.", (uint64_t)(fence));
4953        }
4954        dev_data->fenceMap.erase(fence_pair);
4955    }
4956    lock.unlock();
4957
4958    if (!skipCall)
4959        dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
4960}
4961
4962VKAPI_ATTR void VKAPI_CALL
4963DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
4964    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4965    dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
4966    std::lock_guard<std::mutex> lock(global_lock);
4967    auto item = dev_data->semaphoreMap.find(semaphore);
4968    if (item != dev_data->semaphoreMap.end()) {
4969        if (item->second.in_use.load()) {
4970            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4971                    reinterpret_cast<uint64_t &>(semaphore), __LINE__, DRAWSTATE_INVALID_SEMAPHORE, "DS",
4972                    "Cannot delete semaphore 0x%" PRIx64 " which is in use.", reinterpret_cast<uint64_t &>(semaphore));
4973        }
4974        dev_data->semaphoreMap.erase(semaphore);
4975    }
4976    // TODO : Clean up any internal data structures using this obj.
4977}
4978
4979VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
4980    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4981    bool skip_call = false;
4982    std::unique_lock<std::mutex> lock(global_lock);
4983    auto event_data = dev_data->eventMap.find(event);
4984    if (event_data != dev_data->eventMap.end()) {
4985        if (event_data->second.in_use.load()) {
4986            skip_call |= log_msg(
4987                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
4988                reinterpret_cast<uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
4989                "Cannot delete event 0x%" PRIx64 " which is in use by a command buffer.", reinterpret_cast<uint64_t &>(event));
4990        }
4991        dev_data->eventMap.erase(event_data);
4992    }
4993    lock.unlock();
4994    if (!skip_call)
4995        dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator);
4996    // TODO : Clean up any internal data structures using this obj.
4997}
4998
4999VKAPI_ATTR void VKAPI_CALL
5000DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5001    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5002        ->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
5003    // TODO : Clean up any internal data structures using this obj.
5004}
5005
5006VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
5007                                                   uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
5008                                                   VkQueryResultFlags flags) {
5009    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5010    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
5011    std::unique_lock<std::mutex> lock(global_lock);
5012    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5013        auto pCB = getCBNode(dev_data, cmdBuffer);
5014        for (auto queryStatePair : pCB->queryToStateMap) {
5015            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
5016        }
5017    }
5018    bool skip_call = false;
5019    for (uint32_t i = 0; i < queryCount; ++i) {
5020        QueryObject query = {queryPool, firstQuery + i};
5021        auto queryElement = queriesInFlight.find(query);
5022        auto queryToStateElement = dev_data->queryToStateMap.find(query);
5023        if (queryToStateElement != dev_data->queryToStateMap.end()) {
5024            // Available and in flight
5025            if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5026                queryToStateElement->second) {
5027                for (auto cmdBuffer : queryElement->second) {
5028                    auto pCB = getCBNode(dev_data, cmdBuffer);
5029                    auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
5030                    if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
5031                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5032                                             VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5033                                             "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
5034                                             (uint64_t)(queryPool), firstQuery + i);
5035                    } else {
5036                        for (auto event : queryEventElement->second) {
5037                            dev_data->eventMap[event].needsSignaled = true;
5038                        }
5039                    }
5040                }
5041                // Unavailable and in flight
5042            } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5043                       !queryToStateElement->second) {
5044                // TODO : Can there be the same query in use by multiple command buffers in flight?
5045                bool make_available = false;
5046                for (auto cmdBuffer : queryElement->second) {
5047                    auto pCB = getCBNode(dev_data, cmdBuffer);
5048                    make_available |= pCB->queryToStateMap[query];
5049                }
5050                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5051                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5052                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5053                                         "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5054                                         (uint64_t)(queryPool), firstQuery + i);
5055                }
5056                // Unavailable
5057            } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5058                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5059                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5060                                     "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5061                                     (uint64_t)(queryPool), firstQuery + i);
5062                // Unitialized
5063            } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5064                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5065                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5066                                     "Cannot get query results on queryPool 0x%" PRIx64
5067                                     " with index %d as data has not been collected for this index.",
5068                                     (uint64_t)(queryPool), firstQuery + i);
5069            }
5070        }
5071    }
5072    lock.unlock();
5073    if (skip_call)
5074        return VK_ERROR_VALIDATION_FAILED_EXT;
5075    return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride,
5076                                                                flags);
5077}
5078
5079static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5080    bool skip_call = false;
5081    auto buffer_node = getBufferNode(my_data, buffer);
5082    if (!buffer_node) {
5083        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5084                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5085                             "Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5086    } else {
5087        if (buffer_node->in_use.load()) {
5088            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5089                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5090                                 "Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5091        }
5092    }
5093    return skip_call;
5094}
5095
5096static bool print_memory_range_error(layer_data *dev_data, const uint64_t object_handle, const uint64_t other_handle,
5097                                     VkDebugReportObjectTypeEXT object_type) {
5098    if (object_type == VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT) {
5099        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5100                       MEMTRACK_INVALID_ALIASING, "MEM", "Buffer 0x%" PRIx64 " is aliased with image 0x%" PRIx64, object_handle,
5101                       other_handle);
5102    } else {
5103        return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, 0,
5104                       MEMTRACK_INVALID_ALIASING, "MEM", "Image 0x%" PRIx64 " is aliased with buffer 0x%" PRIx64, object_handle,
5105                       other_handle);
5106    }
5107}
5108
5109static bool validate_memory_range(layer_data *dev_data, const vector<MEMORY_RANGE> &ranges, const MEMORY_RANGE &new_range,
5110                                  VkDebugReportObjectTypeEXT object_type) {
5111    bool skip_call = false;
5112
5113    for (auto range : ranges) {
5114        if ((range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) <
5115            (new_range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)))
5116            continue;
5117        if ((range.start & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)) >
5118            (new_range.end & ~(dev_data->phys_dev_properties.properties.limits.bufferImageGranularity - 1)))
5119            continue;
5120        skip_call |= print_memory_range_error(dev_data, new_range.handle, range.handle, object_type);
5121    }
5122    return skip_call;
5123}
5124
5125static MEMORY_RANGE insert_memory_ranges(uint64_t handle, VkDeviceMemory mem, VkDeviceSize memoryOffset,
5126                                         VkMemoryRequirements memRequirements, vector<MEMORY_RANGE> &ranges) {
5127    MEMORY_RANGE range;
5128    range.handle = handle;
5129    range.memory = mem;
5130    range.start = memoryOffset;
5131    range.end = memoryOffset + memRequirements.size - 1;
5132    ranges.push_back(range);
5133    return range;
5134}
5135
5136static void remove_memory_ranges(uint64_t handle, VkDeviceMemory mem, vector<MEMORY_RANGE> &ranges) {
5137    for (uint32_t item = 0; item < ranges.size(); item++) {
5138        if ((ranges[item].handle == handle) && (ranges[item].memory == mem)) {
5139            ranges.erase(ranges.begin() + item);
5140            break;
5141        }
5142    }
5143}
5144
5145VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer,
5146                                         const VkAllocationCallbacks *pAllocator) {
5147    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5148    bool skipCall = false;
5149    std::unique_lock<std::mutex> lock(global_lock);
5150    if (!validateIdleBuffer(dev_data, buffer) && !skipCall) {
5151        lock.unlock();
5152        dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator);
5153        lock.lock();
5154    }
5155    // Clean up memory binding and range information for buffer
5156    auto buff_node = getBufferNode(dev_data, buffer);
5157    if (buff_node) {
5158        auto mem_info = getMemObjInfo(dev_data, buff_node->mem);
5159        if (mem_info) {
5160            remove_memory_ranges(reinterpret_cast<uint64_t &>(buffer), buff_node->mem, mem_info->bufferRanges);
5161        }
5162        clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5163        dev_data->bufferMap.erase(buff_node->buffer);
5164    }
5165}
5166
5167VKAPI_ATTR void VKAPI_CALL
5168DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5169    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5170    dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
5171    std::lock_guard<std::mutex> lock(global_lock);
5172    auto item = dev_data->bufferViewMap.find(bufferView);
5173    if (item != dev_data->bufferViewMap.end()) {
5174        dev_data->bufferViewMap.erase(item);
5175    }
5176}
5177
5178VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5179    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5180    bool skipCall = false;
5181    if (!skipCall) {
5182        dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
5183    }
5184
5185    std::lock_guard<std::mutex> lock(global_lock);
5186    const auto &imageEntry = dev_data->imageMap.find(image);
5187    if (imageEntry != dev_data->imageMap.end()) {
5188        // Clean up memory mapping, bindings and range references for image
5189        auto mem_info = getMemObjInfo(dev_data, imageEntry->second.get()->mem);
5190        if (mem_info) {
5191            remove_memory_ranges(reinterpret_cast<uint64_t &>(image), imageEntry->second.get()->mem, mem_info->imageRanges);
5192            clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5193            mem_info->image = VK_NULL_HANDLE;
5194        }
5195        // Remove image from imageMap
5196        dev_data->imageMap.erase(imageEntry);
5197    }
5198    const auto& subEntry = dev_data->imageSubresourceMap.find(image);
5199    if (subEntry != dev_data->imageSubresourceMap.end()) {
5200        for (const auto& pair : subEntry->second) {
5201            dev_data->imageLayoutMap.erase(pair);
5202        }
5203        dev_data->imageSubresourceMap.erase(subEntry);
5204    }
5205}
5206
5207static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
5208                                  const char *funcName) {
5209    bool skip_call = false;
5210    if (((1 << mem_info->allocInfo.memoryTypeIndex) & memory_type_bits) == 0) {
5211        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5212                            reinterpret_cast<const uint64_t &>(mem_info->mem), __LINE__, MEMTRACK_INVALID_MEM_TYPE, "MT",
5213                            "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
5214                            "type (0x%X) of this memory object 0x%" PRIx64 ".",
5215                            funcName, memory_type_bits, mem_info->allocInfo.memoryTypeIndex,
5216                            reinterpret_cast<const uint64_t &>(mem_info->mem));
5217    }
5218    return skip_call;
5219}
5220
5221VKAPI_ATTR VkResult VKAPI_CALL
5222BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5223    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5224    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5225    std::unique_lock<std::mutex> lock(global_lock);
5226    // Track objects tied to memory
5227    uint64_t buffer_handle = (uint64_t)(buffer);
5228    bool skipCall =
5229        set_mem_binding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5230    auto buffer_node = getBufferNode(dev_data, buffer);
5231    if (buffer_node) {
5232        buffer_node->mem = mem;
5233        VkMemoryRequirements memRequirements;
5234        dev_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, &memRequirements);
5235
5236        // Track and validate bound memory range information
5237        auto mem_info = getMemObjInfo(dev_data, mem);
5238        if (mem_info) {
5239            const MEMORY_RANGE range =
5240                insert_memory_ranges(buffer_handle, mem, memoryOffset, memRequirements, mem_info->bufferRanges);
5241            skipCall |= validate_memory_range(dev_data, mem_info->imageRanges, range, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5242            skipCall |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "BindBufferMemory");
5243        }
5244
5245        // Validate memory requirements alignment
5246        if (vk_safe_modulo(memoryOffset, memRequirements.alignment) != 0) {
5247            skipCall |=
5248                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
5249                        __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS",
5250                        "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the "
5251                        "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
5252                        ", returned from a call to vkGetBufferMemoryRequirements with buffer",
5253                        memoryOffset, memRequirements.alignment);
5254        }
5255        // Validate device limits alignments
5256        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
5257        if (usage & (VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)) {
5258            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment) != 0) {
5259                skipCall |=
5260                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5261                            0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS",
5262                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5263                            "device limit minTexelBufferOffsetAlignment 0x%" PRIxLEAST64,
5264                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment);
5265            }
5266        }
5267        if (usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) {
5268            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) !=
5269                0) {
5270                skipCall |=
5271                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5272                            0, __LINE__, DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
5273                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5274                            "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64,
5275                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
5276            }
5277        }
5278        if (usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) {
5279            if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) !=
5280                0) {
5281                skipCall |=
5282                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5283                            0, __LINE__, DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
5284                            "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5285                            "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64,
5286                            memoryOffset, dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
5287            }
5288        }
5289    }
5290    print_mem_list(dev_data);
5291    lock.unlock();
5292    if (!skipCall) {
5293        result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset);
5294    }
5295    return result;
5296}
5297
5298VKAPI_ATTR void VKAPI_CALL
5299GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5300    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5301    // TODO : What to track here?
5302    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
5303    my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5304}
5305
5306VKAPI_ATTR void VKAPI_CALL
5307GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5308    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5309    // TODO : What to track here?
5310    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
5311    my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements);
5312}
5313
5314VKAPI_ATTR void VKAPI_CALL
5315DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5316    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5317        ->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
5318    // TODO : Clean up any internal data structures using this obj.
5319}
5320
5321VKAPI_ATTR void VKAPI_CALL
5322DestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
5323    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5324
5325    std::unique_lock<std::mutex> lock(global_lock);
5326    my_data->shaderModuleMap.erase(shaderModule);
5327    lock.unlock();
5328
5329    my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
5330}
5331
5332VKAPI_ATTR void VKAPI_CALL
5333DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
5334    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator);
5335    // TODO : Clean up any internal data structures using this obj.
5336}
5337
5338VKAPI_ATTR void VKAPI_CALL
5339DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
5340    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5341        ->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
5342    // TODO : Clean up any internal data structures using this obj.
5343}
5344
5345VKAPI_ATTR void VKAPI_CALL
5346DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
5347    get_my_data_ptr(get_dispatch_key(device), layer_data_map)->device_dispatch_table->DestroySampler(device, sampler, pAllocator);
5348    // TODO : Clean up any internal data structures using this obj.
5349}
5350
5351VKAPI_ATTR void VKAPI_CALL
5352DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
5353    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5354        ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
5355    // TODO : Clean up any internal data structures using this obj.
5356}
5357
5358VKAPI_ATTR void VKAPI_CALL
5359DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
5360    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5361        ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
5362    // TODO : Clean up any internal data structures using this obj.
5363}
5364// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result
5365//  If this is a secondary command buffer, then make sure its primary is also in-flight
5366//  If primary is not in-flight, then remove secondary from global in-flight set
5367// This function is only valid at a point when cmdBuffer is being reset or freed
5368static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action) {
5369    bool skip_call = false;
5370    if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
5371        // Primary CB or secondary where primary is also in-flight is an error
5372        if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
5373            (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
5374            skip_call |= log_msg(
5375                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5376                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
5377                "Attempt to %s command buffer (0x%" PRIxLEAST64 ") which is in use.", action,
5378                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer));
5379        }
5380    }
5381    return skip_call;
5382}
5383
5384// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
5385static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action) {
5386    bool skip_call = false;
5387    for (auto cmd_buffer : pPool->commandBuffers) {
5388        if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
5389            skip_call |= checkCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action);
5390        }
5391    }
5392    return skip_call;
5393}
5394
5395static void clearCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool) {
5396    for (auto cmd_buffer : pPool->commandBuffers) {
5397        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
5398    }
5399}
5400
5401VKAPI_ATTR void VKAPI_CALL
5402FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
5403    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5404    bool skip_call = false;
5405    std::unique_lock<std::mutex> lock(global_lock);
5406
5407    for (uint32_t i = 0; i < commandBufferCount; i++) {
5408        auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
5409        // Delete CB information structure, and remove from commandBufferMap
5410        if (cb_node) {
5411            skip_call |= checkCommandBufferInFlight(dev_data, cb_node, "free");
5412        }
5413    }
5414    lock.unlock();
5415
5416    if (skip_call)
5417        return;
5418
5419    dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
5420
5421    lock.lock();
5422    auto pPool = getCommandPoolNode(dev_data, commandPool);
5423    for (uint32_t i = 0; i < commandBufferCount; i++) {
5424        auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
5425        // Delete CB information structure, and remove from commandBufferMap
5426        if (cb_node) {
5427            dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer);
5428            // reset prior to delete for data clean-up
5429            resetCB(dev_data, cb_node->commandBuffer);
5430            dev_data->commandBufferMap.erase(cb_node->commandBuffer);
5431            delete cb_node;
5432        }
5433
5434        // Remove commandBuffer reference from commandPoolMap
5435        pPool->commandBuffers.remove(pCommandBuffers[i]);
5436    }
5437    printCBList(dev_data);
5438    lock.unlock();
5439}
5440
5441VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
5442                                                 const VkAllocationCallbacks *pAllocator,
5443                                                 VkCommandPool *pCommandPool) {
5444    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5445
5446    VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
5447
5448    if (VK_SUCCESS == result) {
5449        std::lock_guard<std::mutex> lock(global_lock);
5450        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
5451        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
5452    }
5453    return result;
5454}
5455
5456VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
5457                                               const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
5458
5459    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5460    VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
5461    if (result == VK_SUCCESS) {
5462        std::lock_guard<std::mutex> lock(global_lock);
5463        dev_data->queryPoolMap[*pQueryPool].createInfo = *pCreateInfo;
5464    }
5465    return result;
5466}
5467
5468// Destroy commandPool along with all of the commandBuffers allocated from that pool
5469VKAPI_ATTR void VKAPI_CALL
5470DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
5471    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5472    bool skipCall = false;
5473    std::unique_lock<std::mutex> lock(global_lock);
5474    // Verify that command buffers in pool are complete (not in-flight)
5475    auto pPool = getCommandPoolNode(dev_data, commandPool);
5476    skipCall |= checkCommandBuffersInFlight(dev_data, pPool, "destroy command pool with");
5477
5478    lock.unlock();
5479
5480    if (skipCall)
5481        return;
5482
5483    dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
5484
5485    lock.lock();
5486    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandBufferMap
5487    clearCommandBuffersInFlight(dev_data, pPool);
5488    for (auto cb : pPool->commandBuffers) {
5489        clear_cmd_buf_and_mem_references(dev_data, cb);
5490        auto cb_node = getCBNode(dev_data, cb);
5491        dev_data->commandBufferMap.erase(cb); // Remove this command buffer
5492        delete cb_node;                       // delete CB info structure
5493    }
5494    dev_data->commandPoolMap.erase(commandPool);
5495    lock.unlock();
5496}
5497
5498VKAPI_ATTR VkResult VKAPI_CALL
5499ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
5500    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5501    bool skipCall = false;
5502
5503    std::unique_lock<std::mutex> lock(global_lock);
5504    auto pPool = getCommandPoolNode(dev_data, commandPool);
5505    skipCall |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with");
5506    lock.unlock();
5507
5508    if (skipCall)
5509        return VK_ERROR_VALIDATION_FAILED_EXT;
5510
5511    VkResult result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
5512
5513    // Reset all of the CBs allocated from this pool
5514    if (VK_SUCCESS == result) {
5515        lock.lock();
5516        clearCommandBuffersInFlight(dev_data, pPool);
5517        for (auto cmdBuffer : pPool->commandBuffers) {
5518            resetCB(dev_data, cmdBuffer);
5519        }
5520        lock.unlock();
5521    }
5522    return result;
5523}
5524
5525VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
5526    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5527    bool skipCall = false;
5528    std::unique_lock<std::mutex> lock(global_lock);
5529    for (uint32_t i = 0; i < fenceCount; ++i) {
5530        auto pFence = getFenceNode(dev_data, pFences[i]);
5531        if (pFence && pFence->state == FENCE_INFLIGHT) {
5532            skipCall |=
5533                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5534                            reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5535                            "Fence 0x%" PRIx64 " is in use.", reinterpret_cast<const uint64_t &>(pFences[i]));
5536        }
5537    }
5538    lock.unlock();
5539
5540    if (skipCall)
5541        return VK_ERROR_VALIDATION_FAILED_EXT;
5542
5543    VkResult result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences);
5544
5545    if (result == VK_SUCCESS) {
5546        lock.lock();
5547        for (uint32_t i = 0; i < fenceCount; ++i) {
5548            auto pFence = getFenceNode(dev_data, pFences[i]);
5549            if (pFence) {
5550                pFence->state = FENCE_UNSIGNALED;
5551                // TODO: these should really have already been enforced on
5552                // INFLIGHT->RETIRED transition.
5553                pFence->queues.clear();
5554                pFence->priorFences.clear();
5555            }
5556        }
5557        lock.unlock();
5558    }
5559
5560    return result;
5561}
5562
5563// For given cb_nodes, invalidate them and track object causing invalidation
5564void invalidateCommandBuffers(std::unordered_set<GLOBAL_CB_NODE *> cb_nodes, VK_OBJECT obj) {
5565    for (auto cb_node : cb_nodes) {
5566        cb_node->state = CB_INVALID;
5567        cb_node->broken_bindings.push_back(obj);
5568    }
5569}
5570
5571VKAPI_ATTR void VKAPI_CALL
5572DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
5573    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5574    std::unique_lock<std::mutex> lock(global_lock);
5575    auto fb_node = getFramebuffer(dev_data, framebuffer);
5576    if (fb_node) {
5577        invalidateCommandBuffers(fb_node->cb_bindings,
5578                                 {reinterpret_cast<uint64_t &>(fb_node->framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT});
5579        dev_data->frameBufferMap.erase(fb_node->framebuffer);
5580    }
5581    lock.unlock();
5582    dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
5583}
5584
5585VKAPI_ATTR void VKAPI_CALL
5586DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
5587    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5588    dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
5589    std::lock_guard<std::mutex> lock(global_lock);
5590    dev_data->renderPassMap.erase(renderPass);
5591    // TODO: leaking all the guts of the renderpass node here!
5592}
5593
5594VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
5595                                            const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
5596    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5597
5598    VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
5599
5600    if (VK_SUCCESS == result) {
5601        std::lock_guard<std::mutex> lock(global_lock);
5602        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
5603        dev_data->bufferMap.insert(std::make_pair(*pBuffer, unique_ptr<BUFFER_NODE>(new BUFFER_NODE(*pBuffer, pCreateInfo))));
5604    }
5605    return result;
5606}
5607
5608VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
5609                                                const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
5610    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5611    VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
5612    if (VK_SUCCESS == result) {
5613        std::lock_guard<std::mutex> lock(global_lock);
5614        dev_data->bufferViewMap[*pView] = unique_ptr<VkBufferViewCreateInfo>(new VkBufferViewCreateInfo(*pCreateInfo));
5615        // In order to create a valid buffer view, the buffer must have been created with at least one of the
5616        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
5617        validate_buffer_usage_flags(dev_data, pCreateInfo->buffer,
5618                                    VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false,
5619                                    "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
5620    }
5621    return result;
5622}
5623
5624VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
5625                                           const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
5626    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5627
5628    VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
5629
5630    if (VK_SUCCESS == result) {
5631        std::lock_guard<std::mutex> lock(global_lock);
5632        IMAGE_LAYOUT_NODE image_node;
5633        image_node.layout = pCreateInfo->initialLayout;
5634        image_node.format = pCreateInfo->format;
5635        dev_data->imageMap.insert(std::make_pair(*pImage, unique_ptr<IMAGE_NODE>(new IMAGE_NODE(pCreateInfo))));
5636        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
5637        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
5638        dev_data->imageLayoutMap[subpair] = image_node;
5639    }
5640    return result;
5641}
5642
5643static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
5644    /* expects global_lock to be held by caller */
5645
5646    auto image_node = getImageNode(dev_data, image);
5647    if (image_node) {
5648        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
5649         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
5650         * the actual values.
5651         */
5652        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
5653            range->levelCount = image_node->createInfo.mipLevels - range->baseMipLevel;
5654        }
5655
5656        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
5657            range->layerCount = image_node->createInfo.arrayLayers - range->baseArrayLayer;
5658        }
5659    }
5660}
5661
5662// Return the correct layer/level counts if the caller used the special
5663// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
5664static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
5665                                         VkImage image) {
5666    /* expects global_lock to be held by caller */
5667
5668    *levels = range.levelCount;
5669    *layers = range.layerCount;
5670    auto image_node = getImageNode(dev_data, image);
5671    if (image_node) {
5672        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
5673            *levels = image_node->createInfo.mipLevels - range.baseMipLevel;
5674        }
5675        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
5676            *layers = image_node->createInfo.arrayLayers - range.baseArrayLayer;
5677        }
5678    }
5679}
5680
5681VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
5682                                               const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
5683    bool skipCall = false;
5684    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5685    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5686    {
5687        // Validate that img has correct usage flags set
5688        std::lock_guard<std::mutex> lock(global_lock);
5689        skipCall |= validate_image_usage_flags(dev_data, pCreateInfo->image,
5690                VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT |
5691                VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
5692                false, "vkCreateImageView()", "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT]_BIT");
5693    }
5694
5695    if (!skipCall) {
5696        result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
5697    }
5698
5699    if (VK_SUCCESS == result) {
5700        std::lock_guard<std::mutex> lock(global_lock);
5701        dev_data->imageViewMap[*pView] = unique_ptr<VkImageViewCreateInfo>(new VkImageViewCreateInfo(*pCreateInfo));
5702        ResolveRemainingLevelsLayers(dev_data, &dev_data->imageViewMap[*pView].get()->subresourceRange, pCreateInfo->image);
5703    }
5704
5705    return result;
5706}
5707
5708VKAPI_ATTR VkResult VKAPI_CALL
5709CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
5710    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5711    VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
5712    if (VK_SUCCESS == result) {
5713        std::lock_guard<std::mutex> lock(global_lock);
5714        auto &fence_node = dev_data->fenceMap[*pFence];
5715        fence_node.fence = *pFence;
5716        fence_node.createInfo = *pCreateInfo;
5717        fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
5718    }
5719    return result;
5720}
5721
5722// TODO handle pipeline caches
5723VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
5724                                                   const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
5725    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5726    VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
5727    return result;
5728}
5729
5730VKAPI_ATTR void VKAPI_CALL
5731DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
5732    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5733    dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator);
5734}
5735
5736VKAPI_ATTR VkResult VKAPI_CALL
5737GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
5738    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5739    VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
5740    return result;
5741}
5742
5743VKAPI_ATTR VkResult VKAPI_CALL
5744MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
5745    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5746    VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
5747    return result;
5748}
5749
5750// utility function to set collective state for pipeline
5751void set_pipeline_state(PIPELINE_NODE *pPipe) {
5752    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
5753    if (pPipe->graphicsPipelineCI.pColorBlendState) {
5754        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
5755            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
5756                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5757                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5758                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5759                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5760                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5761                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
5762                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
5763                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
5764                    pPipe->blendConstantsEnabled = true;
5765                }
5766            }
5767        }
5768    }
5769}
5770
5771VKAPI_ATTR VkResult VKAPI_CALL
5772CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
5773                        const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
5774                        VkPipeline *pPipelines) {
5775    VkResult result = VK_SUCCESS;
5776    // TODO What to do with pipelineCache?
5777    // The order of operations here is a little convoluted but gets the job done
5778    //  1. Pipeline create state is first shadowed into PIPELINE_NODE struct
5779    //  2. Create state is then validated (which uses flags setup during shadowing)
5780    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
5781    bool skipCall = false;
5782    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
5783    vector<PIPELINE_NODE *> pPipeNode(count);
5784    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5785
5786    uint32_t i = 0;
5787    std::unique_lock<std::mutex> lock(global_lock);
5788
5789    for (i = 0; i < count; i++) {
5790        pPipeNode[i] = new PIPELINE_NODE;
5791        pPipeNode[i]->initGraphicsPipeline(&pCreateInfos[i]);
5792        pPipeNode[i]->renderPass = getRenderPass(dev_data, pCreateInfos[i].renderPass);
5793        pPipeNode[i]->pipelineLayout = getPipelineLayout(dev_data, pCreateInfos[i].layout);
5794
5795        skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
5796    }
5797
5798    if (!skipCall) {
5799        lock.unlock();
5800        result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
5801                                                                          pPipelines);
5802        lock.lock();
5803        for (i = 0; i < count; i++) {
5804            pPipeNode[i]->pipeline = pPipelines[i];
5805            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
5806        }
5807        lock.unlock();
5808    } else {
5809        for (i = 0; i < count; i++) {
5810            delete pPipeNode[i];
5811        }
5812        lock.unlock();
5813        return VK_ERROR_VALIDATION_FAILED_EXT;
5814    }
5815    return result;
5816}
5817
5818VKAPI_ATTR VkResult VKAPI_CALL
5819CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
5820                       const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
5821                       VkPipeline *pPipelines) {
5822    VkResult result = VK_SUCCESS;
5823    bool skipCall = false;
5824
5825    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
5826    vector<PIPELINE_NODE *> pPipeNode(count);
5827    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5828
5829    uint32_t i = 0;
5830    std::unique_lock<std::mutex> lock(global_lock);
5831    for (i = 0; i < count; i++) {
5832        // TODO: Verify compute stage bits
5833
5834        // Create and initialize internal tracking data structure
5835        pPipeNode[i] = new PIPELINE_NODE;
5836        pPipeNode[i]->initComputePipeline(&pCreateInfos[i]);
5837        pPipeNode[i]->pipelineLayout = getPipelineLayout(dev_data, pCreateInfos[i].layout);
5838        // memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
5839
5840        // TODO: Add Compute Pipeline Verification
5841        skipCall |= !validate_compute_pipeline(dev_data->report_data, pPipeNode[i],
5842                                               &dev_data->phys_dev_properties.features,
5843                                               dev_data->shaderModuleMap);
5844        // skipCall |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
5845    }
5846
5847    if (!skipCall) {
5848        lock.unlock();
5849        result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
5850                                                                         pPipelines);
5851        lock.lock();
5852        for (i = 0; i < count; i++) {
5853            pPipeNode[i]->pipeline = pPipelines[i];
5854            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
5855        }
5856        lock.unlock();
5857    } else {
5858        for (i = 0; i < count; i++) {
5859            // Clean up any locally allocated data structures
5860            delete pPipeNode[i];
5861        }
5862        lock.unlock();
5863        return VK_ERROR_VALIDATION_FAILED_EXT;
5864    }
5865    return result;
5866}
5867
5868VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
5869                                             const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
5870    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5871    VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler);
5872    if (VK_SUCCESS == result) {
5873        std::lock_guard<std::mutex> lock(global_lock);
5874        dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
5875    }
5876    return result;
5877}
5878
5879VKAPI_ATTR VkResult VKAPI_CALL
5880CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
5881                          const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
5882    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5883    VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
5884    if (VK_SUCCESS == result) {
5885        // TODOSC : Capture layout bindings set
5886        std::lock_guard<std::mutex> lock(global_lock);
5887        dev_data->descriptorSetLayoutMap[*pSetLayout] =
5888            new cvdescriptorset::DescriptorSetLayout(dev_data->report_data, pCreateInfo, *pSetLayout);
5889    }
5890    return result;
5891}
5892
5893// Used by CreatePipelineLayout and CmdPushConstants.
5894// Note that the index argument is optional and only used by CreatePipelineLayout.
5895static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
5896                                      const char *caller_name, uint32_t index = 0) {
5897    uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
5898    bool skipCall = false;
5899    // Check that offset + size don't exceed the max.
5900    // Prevent arithetic overflow here by avoiding addition and testing in this order.
5901    if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
5902        // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
5903        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5904            skipCall |=
5905                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5906                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with offset %u and size %u that "
5907                                                              "exceeds this device's maxPushConstantSize of %u.",
5908                        caller_name, index, offset, size, maxPushConstantsSize);
5909        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5910            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5911                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
5912                                                                      "exceeds this device's maxPushConstantSize of %u.",
5913                                caller_name, offset, size, maxPushConstantsSize);
5914        } else {
5915            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5916                                DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
5917        }
5918    }
5919    // size needs to be non-zero and a multiple of 4.
5920    if ((size == 0) || ((size & 0x3) != 0)) {
5921        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5922            skipCall |=
5923                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5924                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
5925                                                              "size %u. Size must be greater than zero and a multiple of 4.",
5926                        caller_name, index, size);
5927        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5928            skipCall |=
5929                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5930                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
5931                                                              "size %u. Size must be greater than zero and a multiple of 4.",
5932                        caller_name, size);
5933        } else {
5934            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5935                                DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
5936        }
5937    }
5938    // offset needs to be a multiple of 4.
5939    if ((offset & 0x3) != 0) {
5940        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
5941            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5942                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
5943                                                                      "offset %u. Offset must be a multiple of 4.",
5944                                caller_name, index, offset);
5945        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
5946            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5947                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
5948                                                                      "offset %u. Offset must be a multiple of 4.",
5949                                caller_name, offset);
5950        } else {
5951            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5952                                DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
5953        }
5954    }
5955    return skipCall;
5956}
5957
5958VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
5959                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
5960    bool skipCall = false;
5961    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5962    // Push Constant Range checks
5963    uint32_t i = 0;
5964    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5965        skipCall |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
5966                                              pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
5967        if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
5968            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5969                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has no stageFlags set.");
5970        }
5971    }
5972    // Each range has been validated.  Now check for overlap between ranges (if they are good).
5973    if (!skipCall) {
5974        uint32_t i, j;
5975        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
5976            for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
5977                const uint32_t minA = pCreateInfo->pPushConstantRanges[i].offset;
5978                const uint32_t maxA = minA + pCreateInfo->pPushConstantRanges[i].size;
5979                const uint32_t minB = pCreateInfo->pPushConstantRanges[j].offset;
5980                const uint32_t maxB = minB + pCreateInfo->pPushConstantRanges[j].size;
5981                if ((minA <= minB && maxA > minB) || (minB <= minA && maxB > minA)) {
5982                    skipCall |=
5983                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
5984                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constants with "
5985                                                                      "overlapping ranges: %u:[%u, %u), %u:[%u, %u)",
5986                                i, minA, maxA, j, minB, maxB);
5987                }
5988            }
5989        }
5990    }
5991
5992    if (skipCall)
5993        return VK_ERROR_VALIDATION_FAILED_EXT;
5994
5995    VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
5996    if (VK_SUCCESS == result) {
5997        std::lock_guard<std::mutex> lock(global_lock);
5998        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
5999        plNode.descriptorSetLayouts.resize(pCreateInfo->setLayoutCount);
6000        plNode.setLayouts.resize(pCreateInfo->setLayoutCount);
6001        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6002            plNode.descriptorSetLayouts[i] = pCreateInfo->pSetLayouts[i];
6003            plNode.setLayouts[i] = getDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
6004        }
6005        plNode.pushConstantRanges.resize(pCreateInfo->pushConstantRangeCount);
6006        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6007            plNode.pushConstantRanges[i] = pCreateInfo->pPushConstantRanges[i];
6008        }
6009    }
6010    return result;
6011}
6012
6013VKAPI_ATTR VkResult VKAPI_CALL
6014CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
6015                     VkDescriptorPool *pDescriptorPool) {
6016    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6017    VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
6018    if (VK_SUCCESS == result) {
6019        // Insert this pool into Global Pool LL at head
6020        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6021                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool 0x%" PRIxLEAST64,
6022                    (uint64_t)*pDescriptorPool))
6023            return VK_ERROR_VALIDATION_FAILED_EXT;
6024        DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
6025        if (NULL == pNewNode) {
6026            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6027                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6028                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
6029                return VK_ERROR_VALIDATION_FAILED_EXT;
6030        } else {
6031            std::lock_guard<std::mutex> lock(global_lock);
6032            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6033        }
6034    } else {
6035        // Need to do anything if pool create fails?
6036    }
6037    return result;
6038}
6039
6040VKAPI_ATTR VkResult VKAPI_CALL
6041ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
6042    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6043    VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags);
6044    if (VK_SUCCESS == result) {
6045        std::lock_guard<std::mutex> lock(global_lock);
6046        clearDescriptorPool(dev_data, device, descriptorPool, flags);
6047    }
6048    return result;
6049}
6050// Ensure the pool contains enough descriptors and descriptor sets to satisfy
6051// an allocation request. Fills common_data with the total number of descriptors of each type required,
6052// as well as DescriptorSetLayout ptrs used for later update.
6053static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6054                                                  cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6055    // All state checks for AllocateDescriptorSets is done in single function
6056    return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data->report_data, pAllocateInfo, dev_data, common_data);
6057}
6058// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
6059static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6060                                                 VkDescriptorSet *pDescriptorSets,
6061                                                 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6062    // All the updates are contained in a single cvdescriptorset function
6063    cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
6064                                                   &dev_data->setMap, dev_data);
6065}
6066
6067VKAPI_ATTR VkResult VKAPI_CALL
6068AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
6069    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6070    std::unique_lock<std::mutex> lock(global_lock);
6071    cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
6072    bool skip_call = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
6073    lock.unlock();
6074
6075    if (skip_call)
6076        return VK_ERROR_VALIDATION_FAILED_EXT;
6077
6078    VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6079
6080    if (VK_SUCCESS == result) {
6081        lock.lock();
6082        PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
6083        lock.unlock();
6084    }
6085    return result;
6086}
6087// Verify state before freeing DescriptorSets
6088static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6089                                              const VkDescriptorSet *descriptor_sets) {
6090    bool skip_call = false;
6091    // First make sure sets being destroyed are not currently in-use
6092    for (uint32_t i = 0; i < count; ++i)
6093        skip_call |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
6094
6095    DESCRIPTOR_POOL_NODE *pool_node = getPoolNode(dev_data, pool);
6096    if (pool_node && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_node->createInfo.flags)) {
6097        // Can't Free from a NON_FREE pool
6098        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6099                             reinterpret_cast<uint64_t &>(pool), __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
6100                             "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6101                             "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
6102    }
6103    return skip_call;
6104}
6105// Sets have been removed from the pool so update underlying state
6106static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6107                                             const VkDescriptorSet *descriptor_sets) {
6108    DESCRIPTOR_POOL_NODE *pool_state = getPoolNode(dev_data, pool);
6109    // Update available descriptor sets in pool
6110    pool_state->availableSets += count;
6111
6112    // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
6113    for (uint32_t i = 0; i < count; ++i) {
6114        auto set_state = dev_data->setMap[descriptor_sets[i]];
6115        uint32_t type_index = 0, descriptor_count = 0;
6116        for (uint32_t j = 0; j < set_state->GetBindingCount(); ++j) {
6117            type_index = static_cast<uint32_t>(set_state->GetTypeFromIndex(j));
6118            descriptor_count = set_state->GetDescriptorCountFromIndex(j);
6119            pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
6120        }
6121        freeDescriptorSet(dev_data, set_state);
6122        pool_state->sets.erase(set_state);
6123    }
6124}
6125
6126VKAPI_ATTR VkResult VKAPI_CALL
6127FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6128    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6129    // Make sure that no sets being destroyed are in-flight
6130    std::unique_lock<std::mutex> lock(global_lock);
6131    bool skipCall = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6132    lock.unlock();
6133    if (skipCall)
6134        return VK_ERROR_VALIDATION_FAILED_EXT;
6135    VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6136    if (VK_SUCCESS == result) {
6137        lock.lock();
6138        PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6139        lock.unlock();
6140    }
6141    return result;
6142}
6143// TODO : This is a Proof-of-concept for core validation architecture
6144//  Really we'll want to break out these functions to separate files but
6145//  keeping it all together here to prove out design
6146// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
6147static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6148                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6149                                                const VkCopyDescriptorSet *pDescriptorCopies) {
6150    // First thing to do is perform map look-ups.
6151    // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
6152    //  so we can't just do a single map look-up up-front, but do them individually in functions below
6153
6154    // Now make call(s) that validate state, but don't perform state updates in this function
6155    // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
6156    //  namespace which will parse params and make calls into specific class instances
6157    return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
6158                                                         descriptorCopyCount, pDescriptorCopies);
6159}
6160// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
6161static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6162                                               const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6163                                               const VkCopyDescriptorSet *pDescriptorCopies) {
6164    cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6165                                                 pDescriptorCopies);
6166}
6167
6168VKAPI_ATTR void VKAPI_CALL
6169UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6170                     uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6171    // Only map look-up at top level is for device-level layer_data
6172    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6173    std::unique_lock<std::mutex> lock(global_lock);
6174    bool skip_call = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6175                                                         pDescriptorCopies);
6176    lock.unlock();
6177    if (!skip_call) {
6178        dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6179                                                              pDescriptorCopies);
6180        lock.lock();
6181        // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
6182        PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6183                                           pDescriptorCopies);
6184    }
6185}
6186
6187VKAPI_ATTR VkResult VKAPI_CALL
6188AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6189    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6190    VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6191    if (VK_SUCCESS == result) {
6192        std::unique_lock<std::mutex> lock(global_lock);
6193        auto pPool = getCommandPoolNode(dev_data, pCreateInfo->commandPool);
6194
6195        if (pPool) {
6196            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6197                // Add command buffer to its commandPool map
6198                pPool->commandBuffers.push_back(pCommandBuffer[i]);
6199                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6200                // Add command buffer to map
6201                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6202                resetCB(dev_data, pCommandBuffer[i]);
6203                pCB->createInfo = *pCreateInfo;
6204                pCB->device = device;
6205            }
6206        }
6207        printCBList(dev_data);
6208        lock.unlock();
6209    }
6210    return result;
6211}
6212
6213VKAPI_ATTR VkResult VKAPI_CALL
6214BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6215    bool skipCall = false;
6216    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6217    std::unique_lock<std::mutex> lock(global_lock);
6218    // Validate command buffer level
6219    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6220    if (pCB) {
6221        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6222        if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
6223            skipCall |=
6224                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6225                        (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6226                        "Calling vkBeginCommandBuffer() on active CB 0x%p before it has completed. "
6227                        "You must check CB fence before this call.",
6228                        commandBuffer);
6229        }
6230        clear_cmd_buf_and_mem_references(dev_data, pCB);
6231        if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6232            // Secondary Command Buffer
6233            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6234            if (!pInfo) {
6235                skipCall |=
6236                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6237                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6238                            "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info.",
6239                            reinterpret_cast<void *>(commandBuffer));
6240            } else {
6241                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6242                    if (!pInfo->renderPass) { // renderpass should NOT be null for a Secondary CB
6243                        skipCall |= log_msg(
6244                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6245                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6246                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must specify a valid renderpass parameter.",
6247                            reinterpret_cast<void *>(commandBuffer));
6248                    }
6249                    if (!pInfo->framebuffer) { // framebuffer may be null for a Secondary CB, but this affects perf
6250                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6251                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6252                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE,
6253                                            "DS", "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) may perform better if a "
6254                                                  "valid framebuffer parameter is specified.",
6255                                            reinterpret_cast<void *>(commandBuffer));
6256                    } else {
6257                        string errorString = "";
6258                        auto framebuffer = getFramebuffer(dev_data, pInfo->framebuffer);
6259                        if (framebuffer) {
6260                            VkRenderPass fbRP = framebuffer->createInfo.renderPass;
6261                            if (!verify_renderpass_compatibility(dev_data, fbRP, pInfo->renderPass, errorString)) {
6262                                // renderPass that framebuffer was created with must be compatible with local renderPass
6263                                skipCall |=
6264                                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6265                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6266                                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
6267                                            "DS", "vkBeginCommandBuffer(): Secondary Command "
6268                                                  "Buffer (0x%p) renderPass (0x%" PRIxLEAST64 ") is incompatible w/ framebuffer "
6269                                                  "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
6270                                            reinterpret_cast<void *>(commandBuffer), (uint64_t)(pInfo->renderPass),
6271                                            (uint64_t)(pInfo->framebuffer), (uint64_t)(fbRP), errorString.c_str());
6272                            }
6273                            // Connect this framebuffer to this cmdBuffer
6274                            framebuffer->cb_bindings.insert(pCB);
6275                        }
6276                    }
6277                }
6278                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
6279                     dev_data->phys_dev_properties.features.occlusionQueryPrecise == VK_FALSE) &&
6280                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
6281                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6282                                        VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6283                                        __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6284                                        "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
6285                                        "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
6286                                        "support precise occlusion queries.",
6287                                        reinterpret_cast<void *>(commandBuffer));
6288                }
6289            }
6290            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
6291                auto renderPass = getRenderPass(dev_data, pInfo->renderPass);
6292                if (renderPass) {
6293                    if (pInfo->subpass >= renderPass->pCreateInfo->subpassCount) {
6294                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6295                                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
6296                                            DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6297                                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must has a subpass index (%d) "
6298                                            "that is less than the number of subpasses (%d).",
6299                                            (void *)commandBuffer, pInfo->subpass, renderPass->pCreateInfo->subpassCount);
6300                    }
6301                }
6302            }
6303        }
6304        if (CB_RECORDING == pCB->state) {
6305            skipCall |=
6306                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6307                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6308                        "vkBeginCommandBuffer(): Cannot call Begin on CB (0x%" PRIxLEAST64
6309                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
6310                        (uint64_t)commandBuffer);
6311        } else if (CB_RECORDED == pCB->state || (CB_INVALID == pCB->state && CMD_END == pCB->cmds.back().type)) {
6312            VkCommandPool cmdPool = pCB->createInfo.commandPool;
6313            auto pPool = getCommandPoolNode(dev_data, cmdPool);
6314            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
6315                skipCall |=
6316                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6317                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6318                            "Call to vkBeginCommandBuffer() on command buffer (0x%" PRIxLEAST64
6319                            ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
6320                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6321                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
6322            }
6323            resetCB(dev_data, commandBuffer);
6324        }
6325        // Set updated state here in case implicit reset occurs above
6326        pCB->state = CB_RECORDING;
6327        pCB->beginInfo = *pBeginInfo;
6328        if (pCB->beginInfo.pInheritanceInfo) {
6329            pCB->inheritanceInfo = *(pCB->beginInfo.pInheritanceInfo);
6330            pCB->beginInfo.pInheritanceInfo = &pCB->inheritanceInfo;
6331            // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
6332            if ((pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
6333                (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6334                pCB->activeRenderPass = getRenderPass(dev_data, pCB->beginInfo.pInheritanceInfo->renderPass);
6335                pCB->activeSubpass = pCB->beginInfo.pInheritanceInfo->subpass;
6336                pCB->framebuffers.insert(pCB->beginInfo.pInheritanceInfo->framebuffer);
6337            }
6338        }
6339    } else {
6340        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6341                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
6342                            "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB 0x%p!", (void *)commandBuffer);
6343    }
6344    lock.unlock();
6345    if (skipCall) {
6346        return VK_ERROR_VALIDATION_FAILED_EXT;
6347    }
6348    VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo);
6349
6350    return result;
6351}
6352
6353VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
6354    bool skipCall = false;
6355    VkResult result = VK_SUCCESS;
6356    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6357    std::unique_lock<std::mutex> lock(global_lock);
6358    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6359    if (pCB) {
6360        if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) || !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6361            // This needs spec clarification to update valid usage, see comments in PR:
6362            // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
6363            skipCall |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer");
6364        }
6365        skipCall |= addCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
6366        for (auto query : pCB->activeQueries) {
6367            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6368                                DRAWSTATE_INVALID_QUERY, "DS",
6369                                "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d",
6370                                (uint64_t)(query.pool), query.index);
6371        }
6372    }
6373    if (!skipCall) {
6374        lock.unlock();
6375        result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
6376        lock.lock();
6377        if (VK_SUCCESS == result) {
6378            pCB->state = CB_RECORDED;
6379            // Reset CB status flags
6380            pCB->status = 0;
6381            printCB(dev_data, commandBuffer);
6382        }
6383    } else {
6384        result = VK_ERROR_VALIDATION_FAILED_EXT;
6385    }
6386    lock.unlock();
6387    return result;
6388}
6389
6390VKAPI_ATTR VkResult VKAPI_CALL
6391ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
6392    bool skip_call = false;
6393    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6394    std::unique_lock<std::mutex> lock(global_lock);
6395    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6396    VkCommandPool cmdPool = pCB->createInfo.commandPool;
6397    auto pPool = getCommandPoolNode(dev_data, cmdPool);
6398    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
6399        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6400                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6401                             "Attempt to reset command buffer (0x%" PRIxLEAST64 ") created from command pool (0x%" PRIxLEAST64
6402                             ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6403                             (uint64_t)commandBuffer, (uint64_t)cmdPool);
6404    }
6405    skip_call |= checkCommandBufferInFlight(dev_data, pCB, "reset");
6406    lock.unlock();
6407    if (skip_call)
6408        return VK_ERROR_VALIDATION_FAILED_EXT;
6409    VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags);
6410    if (VK_SUCCESS == result) {
6411        lock.lock();
6412        dev_data->globalInFlightCmdBuffers.erase(commandBuffer);
6413        resetCB(dev_data, commandBuffer);
6414        lock.unlock();
6415    }
6416    return result;
6417}
6418
6419VKAPI_ATTR void VKAPI_CALL
6420CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
6421    bool skipCall = false;
6422    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6423    std::unique_lock<std::mutex> lock(global_lock);
6424    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6425    if (pCB) {
6426        skipCall |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
6427        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
6428            skipCall |=
6429                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6430                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
6431                        "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
6432                        (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass->renderPass);
6433        }
6434
6435        PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
6436        if (pPN) {
6437            pCB->lastBound[pipelineBindPoint].pipeline = pipeline;
6438            set_cb_pso_status(pCB, pPN);
6439            set_pipeline_state(pPN);
6440        } else {
6441            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
6442                                (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
6443                                "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
6444        }
6445    }
6446    lock.unlock();
6447    if (!skipCall)
6448        dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
6449}
6450
6451VKAPI_ATTR void VKAPI_CALL
6452CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
6453    bool skipCall = false;
6454    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6455    std::unique_lock<std::mutex> lock(global_lock);
6456    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6457    if (pCB) {
6458        skipCall |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
6459        pCB->status |= CBSTATUS_VIEWPORT_SET;
6460        pCB->viewports.resize(viewportCount);
6461        memcpy(pCB->viewports.data(), pViewports, viewportCount * sizeof(VkViewport));
6462    }
6463    lock.unlock();
6464    if (!skipCall)
6465        dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
6466}
6467
6468VKAPI_ATTR void VKAPI_CALL
6469CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
6470    bool skipCall = false;
6471    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6472    std::unique_lock<std::mutex> lock(global_lock);
6473    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6474    if (pCB) {
6475        skipCall |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
6476        pCB->status |= CBSTATUS_SCISSOR_SET;
6477        pCB->scissors.resize(scissorCount);
6478        memcpy(pCB->scissors.data(), pScissors, scissorCount * sizeof(VkRect2D));
6479    }
6480    lock.unlock();
6481    if (!skipCall)
6482        dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
6483}
6484
6485VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
6486    bool skip_call = false;
6487    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6488    std::unique_lock<std::mutex> lock(global_lock);
6489    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6490    if (pCB) {
6491        skip_call |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
6492        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
6493
6494        PIPELINE_NODE *pPipeTrav = getPipeline(dev_data, pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline);
6495        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
6496            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
6497                                 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SET, "DS",
6498                                 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
6499                                 "flag.  This is undefined behavior and could be ignored.");
6500        } else {
6501            skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
6502        }
6503    }
6504    lock.unlock();
6505    if (!skip_call)
6506        dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth);
6507}
6508
6509VKAPI_ATTR void VKAPI_CALL
6510CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
6511    bool skipCall = false;
6512    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6513    std::unique_lock<std::mutex> lock(global_lock);
6514    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6515    if (pCB) {
6516        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
6517        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
6518    }
6519    lock.unlock();
6520    if (!skipCall)
6521        dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp,
6522                                                         depthBiasSlopeFactor);
6523}
6524
6525VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
6526    bool skipCall = false;
6527    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6528    std::unique_lock<std::mutex> lock(global_lock);
6529    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6530    if (pCB) {
6531        skipCall |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
6532        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
6533    }
6534    lock.unlock();
6535    if (!skipCall)
6536        dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants);
6537}
6538
6539VKAPI_ATTR void VKAPI_CALL
6540CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
6541    bool skipCall = false;
6542    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6543    std::unique_lock<std::mutex> lock(global_lock);
6544    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6545    if (pCB) {
6546        skipCall |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
6547        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
6548    }
6549    lock.unlock();
6550    if (!skipCall)
6551        dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
6552}
6553
6554VKAPI_ATTR void VKAPI_CALL
6555CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
6556    bool skipCall = false;
6557    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6558    std::unique_lock<std::mutex> lock(global_lock);
6559    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6560    if (pCB) {
6561        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
6562        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
6563    }
6564    lock.unlock();
6565    if (!skipCall)
6566        dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
6567}
6568
6569VKAPI_ATTR void VKAPI_CALL
6570CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
6571    bool skipCall = false;
6572    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6573    std::unique_lock<std::mutex> lock(global_lock);
6574    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6575    if (pCB) {
6576        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
6577        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
6578    }
6579    lock.unlock();
6580    if (!skipCall)
6581        dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
6582}
6583
6584VKAPI_ATTR void VKAPI_CALL
6585CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
6586    bool skipCall = false;
6587    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6588    std::unique_lock<std::mutex> lock(global_lock);
6589    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6590    if (pCB) {
6591        skipCall |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
6592        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
6593    }
6594    lock.unlock();
6595    if (!skipCall)
6596        dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference);
6597}
6598
6599VKAPI_ATTR void VKAPI_CALL
6600CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
6601                      uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
6602                      const uint32_t *pDynamicOffsets) {
6603    bool skipCall = false;
6604    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6605    std::unique_lock<std::mutex> lock(global_lock);
6606    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6607    if (pCB) {
6608        if (pCB->state == CB_RECORDING) {
6609            // Track total count of dynamic descriptor types to make sure we have an offset for each one
6610            uint32_t totalDynamicDescriptors = 0;
6611            string errorString = "";
6612            uint32_t lastSetIndex = firstSet + setCount - 1;
6613            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
6614                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
6615                pCB->lastBound[pipelineBindPoint].dynamicOffsets.resize(lastSetIndex + 1);
6616            }
6617            auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
6618            for (uint32_t i = 0; i < setCount; i++) {
6619                cvdescriptorset::DescriptorSet *pSet = getSetNode(dev_data, pDescriptorSets[i]);
6620                if (pSet) {
6621                    pCB->lastBound[pipelineBindPoint].uniqueBoundSets.insert(pSet);
6622                    pSet->BindCommandBuffer(pCB);
6623                    pCB->lastBound[pipelineBindPoint].pipelineLayout = layout;
6624                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pSet;
6625                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
6626                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6627                                        DRAWSTATE_NONE, "DS", "DS 0x%" PRIxLEAST64 " bound on pipeline %s",
6628                                        (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
6629                    if (!pSet->IsUpdated() && (pSet->GetTotalDescriptorCount() != 0)) {
6630                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
6631                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i],
6632                                            __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
6633                                            "DS 0x%" PRIxLEAST64
6634                                            " bound but it was never updated. You may want to either update it or not bind it.",
6635                                            (uint64_t)pDescriptorSets[i]);
6636                    }
6637                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
6638                    if (!verify_set_layout_compatibility(dev_data, pSet, layout, i + firstSet, errorString)) {
6639                        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6640                                            VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6641                                            DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
6642                                            "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
6643                                            "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s",
6644                                            i, i + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str());
6645                    }
6646
6647                    auto setDynamicDescriptorCount = pSet->GetDynamicDescriptorCount();
6648
6649                    pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i].clear();
6650
6651                    if (setDynamicDescriptorCount) {
6652                        // First make sure we won't overstep bounds of pDynamicOffsets array
6653                        if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) {
6654                            skipCall |=
6655                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6656                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6657                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
6658                                        "descriptorSet #%u (0x%" PRIxLEAST64
6659                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
6660                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
6661                                        i, (uint64_t)pDescriptorSets[i], pSet->GetDynamicDescriptorCount(),
6662                                        (dynamicOffsetCount - totalDynamicDescriptors));
6663                        } else { // Validate and store dynamic offsets with the set
6664                            // Validate Dynamic Offset Minimums
6665                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
6666                            for (uint32_t d = 0; d < pSet->GetTotalDescriptorCount(); d++) {
6667                                if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
6668                                    if (vk_safe_modulo(
6669                                            pDynamicOffsets[cur_dyn_offset],
6670                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
6671                                        skipCall |= log_msg(
6672                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6673                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
6674                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
6675                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6676                                            "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64,
6677                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6678                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
6679                                    }
6680                                    cur_dyn_offset++;
6681                                } else if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
6682                                    if (vk_safe_modulo(
6683                                            pDynamicOffsets[cur_dyn_offset],
6684                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
6685                                        skipCall |= log_msg(
6686                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6687                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
6688                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
6689                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
6690                                            "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64,
6691                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
6692                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
6693                                    }
6694                                    cur_dyn_offset++;
6695                                }
6696                            }
6697
6698                            pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i] =
6699                                std::vector<uint32_t>(pDynamicOffsets + totalDynamicDescriptors,
6700                                                      pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount);
6701                            // Keep running total of dynamic descriptor count to verify at the end
6702                            totalDynamicDescriptors += setDynamicDescriptorCount;
6703
6704                        }
6705                    }
6706                } else {
6707                    skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6708                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
6709                                        DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS 0x%" PRIxLEAST64 " that doesn't exist!",
6710                                        (uint64_t)pDescriptorSets[i]);
6711                }
6712                skipCall |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
6713                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
6714                if (firstSet > 0) { // Check set #s below the first bound set
6715                    for (uint32_t i = 0; i < firstSet; ++i) {
6716                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
6717                            !verify_set_layout_compatibility(dev_data, pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i],
6718                                                             layout, i, errorString)) {
6719                            skipCall |= log_msg(
6720                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
6721                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
6722                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
6723                                "DescriptorSetDS 0x%" PRIxLEAST64
6724                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
6725                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
6726                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
6727                        }
6728                    }
6729                }
6730                // Check if newly last bound set invalidates any remaining bound sets
6731                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
6732                    if (oldFinalBoundSet &&
6733                        !verify_set_layout_compatibility(dev_data, oldFinalBoundSet, layout, lastSetIndex, errorString)) {
6734                        auto old_set = oldFinalBoundSet->GetSet();
6735                        skipCall |=
6736                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
6737                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__,
6738                                    DRAWSTATE_NONE, "DS", "DescriptorSetDS 0x%" PRIxLEAST64
6739                                                          " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
6740                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
6741                                                          "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
6742                                    reinterpret_cast<uint64_t &>(old_set), lastSetIndex,
6743                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
6744                                    lastSetIndex + 1, (uint64_t)layout);
6745                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
6746                    }
6747                }
6748            }
6749            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
6750            if (totalDynamicDescriptors != dynamicOffsetCount) {
6751                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6752                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
6753                                    DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
6754                                    "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
6755                                    "is %u. It should exactly match the number of dynamic descriptors.",
6756                                    setCount, totalDynamicDescriptors, dynamicOffsetCount);
6757            }
6758        } else {
6759            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
6760        }
6761    }
6762    lock.unlock();
6763    if (!skipCall)
6764        dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
6765                                                               pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
6766}
6767
6768VKAPI_ATTR void VKAPI_CALL
6769CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
6770    bool skipCall = false;
6771    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6772    // TODO : Somewhere need to verify that IBs have correct usage state flagged
6773    std::unique_lock<std::mutex> lock(global_lock);
6774    VkDeviceMemory mem;
6775    skipCall = getBufferMemory(dev_data, buffer, &mem);
6776    auto cb_node = getCBNode(dev_data, commandBuffer);
6777    if (cb_node) {
6778        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindIndexBuffer()"); };
6779        cb_node->validate_functions.push_back(function);
6780        skipCall |= addCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
6781        VkDeviceSize offset_align = 0;
6782        switch (indexType) {
6783        case VK_INDEX_TYPE_UINT16:
6784            offset_align = 2;
6785            break;
6786        case VK_INDEX_TYPE_UINT32:
6787            offset_align = 4;
6788            break;
6789        default:
6790            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
6791            break;
6792        }
6793        if (!offset_align || (offset % offset_align)) {
6794            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6795                                DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
6796                                "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
6797                                offset, string_VkIndexType(indexType));
6798        }
6799        cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
6800    }
6801    lock.unlock();
6802    if (!skipCall)
6803        dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
6804}
6805
6806void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
6807    uint32_t end = firstBinding + bindingCount;
6808    if (pCB->currentDrawData.buffers.size() < end) {
6809        pCB->currentDrawData.buffers.resize(end);
6810    }
6811    for (uint32_t i = 0; i < bindingCount; ++i) {
6812        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
6813    }
6814}
6815
6816static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
6817
6818VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
6819                                                uint32_t bindingCount, const VkBuffer *pBuffers,
6820                                                const VkDeviceSize *pOffsets) {
6821    bool skipCall = false;
6822    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6823    // TODO : Somewhere need to verify that VBs have correct usage state flagged
6824    std::unique_lock<std::mutex> lock(global_lock);
6825    auto cb_node = getCBNode(dev_data, commandBuffer);
6826    if (cb_node) {
6827        for (uint32_t i = 0; i < bindingCount; ++i) {
6828            VkDeviceMemory mem;
6829            skipCall |= getBufferMemory(dev_data, pBuffers[i], &mem);
6830
6831            std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, mem, "vkCmdBindVertexBuffers()"); };
6832            cb_node->validate_functions.push_back(function);
6833        }
6834        addCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
6835        updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
6836    } else {
6837        skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
6838    }
6839    lock.unlock();
6840    if (!skipCall)
6841        dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
6842}
6843
6844/* expects global_lock to be held by caller */
6845static bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
6846    bool skip_call = false;
6847
6848    for (auto imageView : pCB->updateImages) {
6849        auto iv_data = getImageViewData(dev_data, imageView);
6850        if (!iv_data)
6851            continue;
6852        VkImage image = iv_data->image;
6853        VkDeviceMemory mem;
6854        skip_call |= getImageMemory(dev_data, image, &mem);
6855        std::function<bool()> function = [=]() {
6856            set_memory_valid(dev_data, mem, true, image);
6857            return false;
6858        };
6859        pCB->validate_functions.push_back(function);
6860    }
6861    for (auto buffer : pCB->updateBuffers) {
6862        VkDeviceMemory mem;
6863        skip_call |= getBufferMemory(dev_data, buffer, &mem);
6864        std::function<bool()> function = [=]() {
6865            set_memory_valid(dev_data, mem, true);
6866            return false;
6867        };
6868        pCB->validate_functions.push_back(function);
6869    }
6870    return skip_call;
6871}
6872
6873VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
6874                                   uint32_t firstVertex, uint32_t firstInstance) {
6875    bool skipCall = false;
6876    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6877    std::unique_lock<std::mutex> lock(global_lock);
6878    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6879    if (pCB) {
6880        skipCall |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
6881        pCB->drawCount[DRAW]++;
6882        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS);
6883        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6884        // TODO : Need to pass commandBuffer as srcObj here
6885        skipCall |=
6886            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
6887                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW]++);
6888        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6889        if (!skipCall) {
6890            updateResourceTrackingOnDraw(pCB);
6891        }
6892        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
6893    }
6894    lock.unlock();
6895    if (!skipCall)
6896        dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
6897}
6898
6899VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
6900                                          uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
6901                                                            uint32_t firstInstance) {
6902    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6903    bool skipCall = false;
6904    std::unique_lock<std::mutex> lock(global_lock);
6905    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6906    if (pCB) {
6907        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
6908        pCB->drawCount[DRAW_INDEXED]++;
6909        skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS);
6910        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6911        // TODO : Need to pass commandBuffer as srcObj here
6912        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
6913                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
6914                            "vkCmdDrawIndexed() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
6915        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6916        if (!skipCall) {
6917            updateResourceTrackingOnDraw(pCB);
6918        }
6919        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
6920    }
6921    lock.unlock();
6922    if (!skipCall)
6923        dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
6924                                                        firstInstance);
6925}
6926
6927VKAPI_ATTR void VKAPI_CALL
6928CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
6929    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6930    bool skipCall = false;
6931    std::unique_lock<std::mutex> lock(global_lock);
6932    VkDeviceMemory mem;
6933    // MTMTODO : merge with code below
6934    skipCall = getBufferMemory(dev_data, buffer, &mem);
6935    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndirect");
6936    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6937    if (pCB) {
6938        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
6939        pCB->drawCount[DRAW_INDIRECT]++;
6940        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS);
6941        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6942        // TODO : Need to pass commandBuffer as srcObj here
6943        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
6944                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
6945                            "vkCmdDrawIndirect() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
6946        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6947        if (!skipCall) {
6948            updateResourceTrackingOnDraw(pCB);
6949        }
6950        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndirect");
6951    }
6952    lock.unlock();
6953    if (!skipCall)
6954        dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
6955}
6956
6957VKAPI_ATTR void VKAPI_CALL
6958CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
6959    bool skipCall = false;
6960    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6961    std::unique_lock<std::mutex> lock(global_lock);
6962    VkDeviceMemory mem;
6963    // MTMTODO : merge with code below
6964    skipCall = getBufferMemory(dev_data, buffer, &mem);
6965    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDrawIndexedIndirect");
6966    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6967    if (pCB) {
6968        skipCall |= addCmd(dev_data, pCB, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
6969        pCB->drawCount[DRAW_INDEXED_INDIRECT]++;
6970        skipCall |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS);
6971        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6972        // TODO : Need to pass commandBuffer as srcObj here
6973        skipCall |=
6974            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
6975                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call 0x%" PRIx64 ", reporting DS state:",
6976                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
6977        skipCall |= synchAndPrintDSConfig(dev_data, commandBuffer);
6978        if (!skipCall) {
6979            updateResourceTrackingOnDraw(pCB);
6980        }
6981        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexedIndirect");
6982    }
6983    lock.unlock();
6984    if (!skipCall)
6985        dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
6986}
6987
6988VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
6989    bool skipCall = false;
6990    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6991    std::unique_lock<std::mutex> lock(global_lock);
6992    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
6993    if (pCB) {
6994        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE);
6995        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
6996        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
6997        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
6998    }
6999    lock.unlock();
7000    if (!skipCall)
7001        dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
7002}
7003
7004VKAPI_ATTR void VKAPI_CALL
7005CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7006    bool skipCall = false;
7007    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7008    std::unique_lock<std::mutex> lock(global_lock);
7009    VkDeviceMemory mem;
7010    skipCall = getBufferMemory(dev_data, buffer, &mem);
7011    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdDispatchIndirect");
7012    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7013    if (pCB) {
7014        skipCall |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE);
7015        skipCall |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7016        skipCall |= addCmd(dev_data, pCB, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
7017        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdDispatchIndirect");
7018    }
7019    lock.unlock();
7020    if (!skipCall)
7021        dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset);
7022}
7023
7024VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7025                                         uint32_t regionCount, const VkBufferCopy *pRegions) {
7026    bool skipCall = false;
7027    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7028    std::unique_lock<std::mutex> lock(global_lock);
7029    VkDeviceMemory src_mem, dst_mem;
7030    skipCall = getBufferMemory(dev_data, srcBuffer, &src_mem);
7031    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyBuffer");
7032    skipCall |= getBufferMemory(dev_data, dstBuffer, &dst_mem);
7033
7034    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyBuffer");
7035    // Validate that SRC & DST buffers have correct usage flags set
7036    skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
7037                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7038    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
7039                                            "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7040    auto cb_node = getCBNode(dev_data, commandBuffer);
7041    if (cb_node) {
7042        std::function<bool()> function = [=]() { return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyBuffer()"); };
7043        cb_node->validate_functions.push_back(function);
7044        function = [=]() {
7045            set_memory_valid(dev_data, dst_mem, true);
7046            return false;
7047        };
7048        cb_node->validate_functions.push_back(function);
7049
7050        skipCall |= addCmd(dev_data, cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
7051        skipCall |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBuffer");
7052    }
7053    lock.unlock();
7054    if (!skipCall)
7055        dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7056}
7057
7058static bool VerifySourceImageLayout(VkCommandBuffer cmdBuffer, VkImage srcImage, VkImageSubresourceLayers subLayers,
7059                                    VkImageLayout srcImageLayout) {
7060    bool skip_call = false;
7061
7062    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7063    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7064    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7065        uint32_t layer = i + subLayers.baseArrayLayer;
7066        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7067        IMAGE_CMD_BUF_LAYOUT_NODE node;
7068        if (!FindLayout(pCB, srcImage, sub, node)) {
7069            SetLayout(pCB, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
7070            continue;
7071        }
7072        if (node.layout != srcImageLayout) {
7073            // TODO: Improve log message in the next pass
7074            skip_call |=
7075                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7076                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
7077                                                                        "and doesn't match the current layout %s.",
7078                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
7079        }
7080    }
7081    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
7082        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7083            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7084            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7085                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7086                                 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
7087        } else {
7088            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7089                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
7090                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
7091                                 string_VkImageLayout(srcImageLayout));
7092        }
7093    }
7094    return skip_call;
7095}
7096
7097static bool VerifyDestImageLayout(VkCommandBuffer cmdBuffer, VkImage destImage, VkImageSubresourceLayers subLayers,
7098                                  VkImageLayout destImageLayout) {
7099    bool skip_call = false;
7100
7101    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7102    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7103    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7104        uint32_t layer = i + subLayers.baseArrayLayer;
7105        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7106        IMAGE_CMD_BUF_LAYOUT_NODE node;
7107        if (!FindLayout(pCB, destImage, sub, node)) {
7108            SetLayout(pCB, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
7109            continue;
7110        }
7111        if (node.layout != destImageLayout) {
7112            skip_call |=
7113                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7114                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7115                                                                        "doesn't match the current layout %s.",
7116                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7117        }
7118    }
7119    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7120        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7121            // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7122            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7123                                 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7124                                 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7125        } else {
7126            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7127                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
7128                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
7129                                 string_VkImageLayout(destImageLayout));
7130        }
7131    }
7132    return skip_call;
7133}
7134
7135VKAPI_ATTR void VKAPI_CALL
7136CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7137             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
7138    bool skipCall = false;
7139    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7140    std::unique_lock<std::mutex> lock(global_lock);
7141    VkDeviceMemory src_mem, dst_mem;
7142    // Validate that src & dst images have correct usage flags set
7143    skipCall = getImageMemory(dev_data, srcImage, &src_mem);
7144    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyImage");
7145
7146    skipCall |= getImageMemory(dev_data, dstImage, &dst_mem);
7147    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyImage");
7148    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7149                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7150    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7151                                           "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7152    auto cb_node = getCBNode(dev_data, commandBuffer);
7153    if (cb_node) {
7154        std::function<bool()> function = [=]() {
7155            return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyImage()", srcImage);
7156        };
7157        cb_node->validate_functions.push_back(function);
7158        function = [=]() {
7159            set_memory_valid(dev_data, dst_mem, true, dstImage);
7160            return false;
7161        };
7162        cb_node->validate_functions.push_back(function);
7163
7164        skipCall |= addCmd(dev_data, cb_node, CMD_COPYIMAGE, "vkCmdCopyImage()");
7165        skipCall |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImage");
7166        for (uint32_t i = 0; i < regionCount; ++i) {
7167            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].srcSubresource, srcImageLayout);
7168            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].dstSubresource, dstImageLayout);
7169        }
7170    }
7171    lock.unlock();
7172    if (!skipCall)
7173        dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7174                                                      regionCount, pRegions);
7175}
7176
7177VKAPI_ATTR void VKAPI_CALL
7178CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7179             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
7180    bool skipCall = false;
7181    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7182    std::unique_lock<std::mutex> lock(global_lock);
7183    VkDeviceMemory src_mem, dst_mem;
7184    // Validate that src & dst images have correct usage flags set
7185    skipCall = getImageMemory(dev_data, srcImage, &src_mem);
7186    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdBlitImage");
7187
7188    skipCall |= getImageMemory(dev_data, dstImage, &dst_mem);
7189    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdBlitImage");
7190    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
7191                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7192    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
7193                                           "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7194
7195    auto cb_node = getCBNode(dev_data, commandBuffer);
7196    if (cb_node) {
7197        std::function<bool()> function = [=]() {
7198            return validate_memory_is_valid(dev_data, src_mem, "vkCmdBlitImage()", srcImage);
7199        };
7200        cb_node->validate_functions.push_back(function);
7201        function = [=]() {
7202            set_memory_valid(dev_data, dst_mem, true, dstImage);
7203            return false;
7204        };
7205        cb_node->validate_functions.push_back(function);
7206
7207        skipCall |= addCmd(dev_data, cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()");
7208        skipCall |= insideRenderPass(dev_data, cb_node, "vkCmdBlitImage");
7209    }
7210    lock.unlock();
7211    if (!skipCall)
7212        dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7213                                                      regionCount, pRegions, filter);
7214}
7215
7216VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
7217                                                VkImage dstImage, VkImageLayout dstImageLayout,
7218                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7219    bool skipCall = false;
7220    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7221    std::unique_lock<std::mutex> lock(global_lock);
7222    VkDeviceMemory dst_mem, src_mem;
7223    skipCall = getImageMemory(dev_data, dstImage, &dst_mem);
7224    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyBufferToImage");
7225
7226    skipCall |= getBufferMemory(dev_data, srcBuffer, &src_mem);
7227    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyBufferToImage");
7228    // Validate that src buff & dst image have correct usage flags set
7229    skipCall |= validate_buffer_usage_flags(dev_data, srcBuffer, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyBufferToImage()",
7230                                            "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7231    skipCall |= validate_image_usage_flags(dev_data, dstImage, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyBufferToImage()",
7232                                           "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
7233    auto cb_node = getCBNode(dev_data, commandBuffer);
7234    if (cb_node) {
7235        std::function<bool()> function = [=]() {
7236            set_memory_valid(dev_data, dst_mem, true, dstImage);
7237            return false;
7238        };
7239        cb_node->validate_functions.push_back(function);
7240        function = [=]() { return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyBufferToImage()"); };
7241        cb_node->validate_functions.push_back(function);
7242
7243        skipCall |= addCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
7244        skipCall |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBufferToImage");
7245        for (uint32_t i = 0; i < regionCount; ++i) {
7246            skipCall |= VerifyDestImageLayout(commandBuffer, dstImage, pRegions[i].imageSubresource, dstImageLayout);
7247        }
7248    }
7249    lock.unlock();
7250    if (!skipCall)
7251        dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
7252                                                              pRegions);
7253}
7254
7255VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
7256                                                VkImageLayout srcImageLayout, VkBuffer dstBuffer,
7257                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
7258    bool skipCall = false;
7259    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7260    std::unique_lock<std::mutex> lock(global_lock);
7261    VkDeviceMemory src_mem, dst_mem;
7262    skipCall = getImageMemory(dev_data, srcImage, &src_mem);
7263    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdCopyImageToBuffer");
7264
7265    skipCall |= getBufferMemory(dev_data, dstBuffer, &dst_mem);
7266    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdCopyImageToBuffer");
7267    // Validate that dst buff & src image have correct usage flags set
7268    skipCall |= validate_image_usage_flags(dev_data, srcImage, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyImageToBuffer()",
7269                                           "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
7270    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyImageToBuffer()",
7271                                            "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7272
7273    auto cb_node = getCBNode(dev_data, commandBuffer);
7274    if (cb_node) {
7275        std::function<bool()> function = [=]() {
7276            return validate_memory_is_valid(dev_data, src_mem, "vkCmdCopyImageToBuffer()", srcImage);
7277        };
7278        cb_node->validate_functions.push_back(function);
7279        function = [=]() {
7280            set_memory_valid(dev_data, dst_mem, true);
7281            return false;
7282        };
7283        cb_node->validate_functions.push_back(function);
7284
7285        skipCall |= addCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
7286        skipCall |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImageToBuffer");
7287        for (uint32_t i = 0; i < regionCount; ++i) {
7288            skipCall |= VerifySourceImageLayout(commandBuffer, srcImage, pRegions[i].imageSubresource, srcImageLayout);
7289        }
7290    }
7291    lock.unlock();
7292    if (!skipCall)
7293        dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
7294                                                              pRegions);
7295}
7296
7297VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
7298                                           VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
7299    bool skipCall = false;
7300    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7301    std::unique_lock<std::mutex> lock(global_lock);
7302    VkDeviceMemory mem;
7303    skipCall = getBufferMemory(dev_data, dstBuffer, &mem);
7304    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdUpdateBuffer");
7305    // Validate that dst buff has correct usage flags set
7306    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdUpdateBuffer()",
7307                                            "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7308
7309    auto cb_node = getCBNode(dev_data, commandBuffer);
7310    if (cb_node) {
7311        std::function<bool()> function = [=]() {
7312            set_memory_valid(dev_data, mem, true);
7313            return false;
7314        };
7315        cb_node->validate_functions.push_back(function);
7316
7317        skipCall |= addCmd(dev_data, cb_node, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
7318        skipCall |= insideRenderPass(dev_data, cb_node, "vkCmdCopyUpdateBuffer");
7319    }
7320    lock.unlock();
7321    if (!skipCall)
7322        dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
7323}
7324
7325VKAPI_ATTR void VKAPI_CALL
7326CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
7327    bool skipCall = false;
7328    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7329    std::unique_lock<std::mutex> lock(global_lock);
7330    VkDeviceMemory mem;
7331    skipCall = getBufferMemory(dev_data, dstBuffer, &mem);
7332    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdFillBuffer");
7333    // Validate that dst buff has correct usage flags set
7334    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdFillBuffer()",
7335                                            "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7336
7337    auto cb_node = getCBNode(dev_data, commandBuffer);
7338    if (cb_node) {
7339        std::function<bool()> function = [=]() {
7340            set_memory_valid(dev_data, mem, true);
7341            return false;
7342        };
7343        cb_node->validate_functions.push_back(function);
7344
7345        skipCall |= addCmd(dev_data, cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()");
7346        skipCall |= insideRenderPass(dev_data, cb_node, "vkCmdCopyFillBuffer");
7347    }
7348    lock.unlock();
7349    if (!skipCall)
7350        dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
7351}
7352
7353VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
7354                                               const VkClearAttachment *pAttachments, uint32_t rectCount,
7355                                               const VkClearRect *pRects) {
7356    bool skipCall = false;
7357    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7358    std::unique_lock<std::mutex> lock(global_lock);
7359    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7360    if (pCB) {
7361        skipCall |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
7362        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
7363        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
7364            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
7365            // TODO : commandBuffer should be srcObj
7366            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
7367            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
7368            // call CmdClearAttachments
7369            // Otherwise this seems more like a performance warning.
7370            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7371                                VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
7372                                "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
7373                                " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
7374                                (uint64_t)(commandBuffer));
7375        }
7376        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments");
7377    }
7378
7379    // Validate that attachment is in reference list of active subpass
7380    if (pCB->activeRenderPass) {
7381        const VkRenderPassCreateInfo *pRPCI = pCB->activeRenderPass->pCreateInfo;
7382        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
7383
7384        for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
7385            const VkClearAttachment *attachment = &pAttachments[attachment_idx];
7386            if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
7387                bool found = false;
7388                for (uint32_t i = 0; i < pSD->colorAttachmentCount; i++) {
7389                    if (attachment->colorAttachment == pSD->pColorAttachments[i].attachment) {
7390                        found = true;
7391                        break;
7392                    }
7393                }
7394                if (!found) {
7395                    skipCall |= log_msg(
7396                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7397                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
7398                        "vkCmdClearAttachments() attachment index %d not found in attachment reference array of active subpass %d",
7399                        attachment->colorAttachment, pCB->activeSubpass);
7400                }
7401            } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
7402                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
7403                    (pSD->pDepthStencilAttachment->attachment ==
7404                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
7405
7406                    skipCall |= log_msg(
7407                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7408                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
7409                        "vkCmdClearAttachments() attachment index %d does not match depthStencilAttachment.attachment (%d) found "
7410                        "in active subpass %d",
7411                        attachment->colorAttachment,
7412                        (pSD->pDepthStencilAttachment) ? pSD->pDepthStencilAttachment->attachment : VK_ATTACHMENT_UNUSED,
7413                        pCB->activeSubpass);
7414                }
7415            }
7416        }
7417    }
7418    lock.unlock();
7419    if (!skipCall)
7420        dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
7421}
7422
7423VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
7424                                              VkImageLayout imageLayout, const VkClearColorValue *pColor,
7425                                              uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
7426    bool skipCall = false;
7427    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7428    std::unique_lock<std::mutex> lock(global_lock);
7429    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
7430    VkDeviceMemory mem;
7431    skipCall = getImageMemory(dev_data, image, &mem);
7432    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearColorImage");
7433    auto cb_node = getCBNode(dev_data, commandBuffer);
7434    if (cb_node) {
7435        std::function<bool()> function = [=]() {
7436            set_memory_valid(dev_data, mem, true, image);
7437            return false;
7438        };
7439        cb_node->validate_functions.push_back(function);
7440
7441        skipCall |= addCmd(dev_data, cb_node, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
7442        skipCall |= insideRenderPass(dev_data, cb_node, "vkCmdClearColorImage");
7443    }
7444    lock.unlock();
7445    if (!skipCall)
7446        dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
7447}
7448
7449VKAPI_ATTR void VKAPI_CALL
7450CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
7451                          const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
7452                          const VkImageSubresourceRange *pRanges) {
7453    bool skipCall = false;
7454    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7455    std::unique_lock<std::mutex> lock(global_lock);
7456    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
7457    VkDeviceMemory mem;
7458    skipCall = getImageMemory(dev_data, image, &mem);
7459    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdClearDepthStencilImage");
7460    auto cb_node = getCBNode(dev_data, commandBuffer);
7461    if (cb_node) {
7462        std::function<bool()> function = [=]() {
7463            set_memory_valid(dev_data, mem, true, image);
7464            return false;
7465        };
7466        cb_node->validate_functions.push_back(function);
7467
7468        skipCall |= addCmd(dev_data, cb_node, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
7469        skipCall |= insideRenderPass(dev_data, cb_node, "vkCmdClearDepthStencilImage");
7470    }
7471    lock.unlock();
7472    if (!skipCall)
7473        dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
7474                                                                   pRanges);
7475}
7476
7477VKAPI_ATTR void VKAPI_CALL
7478CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7479                VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
7480    bool skipCall = false;
7481    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7482    std::unique_lock<std::mutex> lock(global_lock);
7483    VkDeviceMemory src_mem, dst_mem;
7484    skipCall = getImageMemory(dev_data, srcImage, &src_mem);
7485    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, src_mem, "vkCmdResolveImage");
7486
7487    skipCall |= getImageMemory(dev_data, dstImage, &dst_mem);
7488    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, dst_mem, "vkCmdResolveImage");
7489    auto cb_node = getCBNode(dev_data, commandBuffer);
7490    if (cb_node) {
7491        std::function<bool()> function = [=]() {
7492            return validate_memory_is_valid(dev_data, src_mem, "vkCmdResolveImage()", srcImage);
7493        };
7494        cb_node->validate_functions.push_back(function);
7495        function = [=]() {
7496            set_memory_valid(dev_data, dst_mem, true, dstImage);
7497            return false;
7498        };
7499        cb_node->validate_functions.push_back(function);
7500
7501        skipCall |= addCmd(dev_data, cb_node, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
7502        skipCall |= insideRenderPass(dev_data, cb_node, "vkCmdResolveImage");
7503    }
7504    lock.unlock();
7505    if (!skipCall)
7506        dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
7507                                                         regionCount, pRegions);
7508}
7509
7510bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7511    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7512    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7513    if (pCB) {
7514        pCB->eventToStageMap[event] = stageMask;
7515    }
7516    auto queue_data = dev_data->queueMap.find(queue);
7517    if (queue_data != dev_data->queueMap.end()) {
7518        queue_data->second.eventToStageMap[event] = stageMask;
7519    }
7520    return false;
7521}
7522
7523VKAPI_ATTR void VKAPI_CALL
7524CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7525    bool skipCall = false;
7526    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7527    std::unique_lock<std::mutex> lock(global_lock);
7528    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7529    if (pCB) {
7530        skipCall |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
7531        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
7532        pCB->events.push_back(event);
7533        if (!pCB->waitedEvents.count(event)) {
7534            pCB->writeEventsBeforeWait.push_back(event);
7535        }
7536        std::function<bool(VkQueue)> eventUpdate =
7537            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
7538        pCB->eventUpdates.push_back(eventUpdate);
7539    }
7540    lock.unlock();
7541    if (!skipCall)
7542        dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask);
7543}
7544
7545VKAPI_ATTR void VKAPI_CALL
7546CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
7547    bool skipCall = false;
7548    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7549    std::unique_lock<std::mutex> lock(global_lock);
7550    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7551    if (pCB) {
7552        skipCall |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
7553        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
7554        pCB->events.push_back(event);
7555        if (!pCB->waitedEvents.count(event)) {
7556            pCB->writeEventsBeforeWait.push_back(event);
7557        }
7558        std::function<bool(VkQueue)> eventUpdate =
7559            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
7560        pCB->eventUpdates.push_back(eventUpdate);
7561    }
7562    lock.unlock();
7563    if (!skipCall)
7564        dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask);
7565}
7566
7567static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
7568                                   const VkImageMemoryBarrier *pImgMemBarriers) {
7569    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7570    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7571    bool skip = false;
7572    uint32_t levelCount = 0;
7573    uint32_t layerCount = 0;
7574
7575    for (uint32_t i = 0; i < memBarrierCount; ++i) {
7576        auto mem_barrier = &pImgMemBarriers[i];
7577        if (!mem_barrier)
7578            continue;
7579        // TODO: Do not iterate over every possibility - consolidate where
7580        // possible
7581        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
7582
7583        for (uint32_t j = 0; j < levelCount; j++) {
7584            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
7585            for (uint32_t k = 0; k < layerCount; k++) {
7586                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
7587                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
7588                IMAGE_CMD_BUF_LAYOUT_NODE node;
7589                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
7590                    SetLayout(pCB, mem_barrier->image, sub,
7591                              IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
7592                    continue;
7593                }
7594                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
7595                    // TODO: Set memory invalid which is in mem_tracker currently
7596                } else if (node.layout != mem_barrier->oldLayout) {
7597                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7598                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
7599                                                                                    "when current layout is %s.",
7600                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
7601                }
7602                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
7603            }
7604        }
7605    }
7606    return skip;
7607}
7608
7609// Print readable FlagBits in FlagMask
7610static std::string string_VkAccessFlags(VkAccessFlags accessMask) {
7611    std::string result;
7612    std::string separator;
7613
7614    if (accessMask == 0) {
7615        result = "[None]";
7616    } else {
7617        result = "[";
7618        for (auto i = 0; i < 32; i++) {
7619            if (accessMask & (1 << i)) {
7620                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
7621                separator = " | ";
7622            }
7623        }
7624        result = result + "]";
7625    }
7626    return result;
7627}
7628
7629// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
7630// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
7631// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
7632static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
7633                             const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
7634                             const char *type) {
7635    bool skip_call = false;
7636
7637    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
7638        if (accessMask & ~(required_bit | optional_bits)) {
7639            // TODO: Verify against Valid Use
7640            skip_call |=
7641                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7642                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
7643                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
7644        }
7645    } else {
7646        if (!required_bit) {
7647            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7648                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
7649                                                                  "%s when layout is %s, unless the app has previously added a "
7650                                                                  "barrier for this transition.",
7651                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
7652                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
7653        } else {
7654            std::string opt_bits;
7655            if (optional_bits != 0) {
7656                std::stringstream ss;
7657                ss << optional_bits;
7658                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
7659            }
7660            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7661                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
7662                                                                  "layout is %s, unless the app has previously added a barrier for "
7663                                                                  "this transition.",
7664                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
7665                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
7666        }
7667    }
7668    return skip_call;
7669}
7670
7671static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
7672                                        const VkImageLayout &layout, const char *type) {
7673    bool skip_call = false;
7674    switch (layout) {
7675    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
7676        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
7677                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
7678        break;
7679    }
7680    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
7681        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
7682                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
7683        break;
7684    }
7685    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
7686        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
7687        break;
7688    }
7689    case VK_IMAGE_LAYOUT_PREINITIALIZED: {
7690        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
7691        break;
7692    }
7693    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
7694        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
7695                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
7696        break;
7697    }
7698    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
7699        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
7700                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
7701        break;
7702    }
7703    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
7704        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
7705        break;
7706    }
7707    case VK_IMAGE_LAYOUT_UNDEFINED: {
7708        if (accessMask != 0) {
7709            // TODO: Verify against Valid Use section spec
7710            skip_call |=
7711                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7712                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
7713                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
7714        }
7715        break;
7716    }
7717    case VK_IMAGE_LAYOUT_GENERAL:
7718    default: { break; }
7719    }
7720    return skip_call;
7721}
7722
7723static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
7724                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
7725                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
7726                             const VkImageMemoryBarrier *pImageMemBarriers) {
7727    bool skip_call = false;
7728    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
7729    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
7730    if (pCB->activeRenderPass && memBarrierCount) {
7731        if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
7732            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7733                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
7734                                                                  "with no self dependency specified.",
7735                                 funcName, pCB->activeSubpass);
7736        }
7737    }
7738    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
7739        auto mem_barrier = &pImageMemBarriers[i];
7740        auto image_data = getImageNode(dev_data, mem_barrier->image);
7741        if (image_data) {
7742            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
7743            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
7744            if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
7745                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
7746                // be VK_QUEUE_FAMILY_IGNORED
7747                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
7748                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7749                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7750                                         "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
7751                                         "VK_SHARING_MODE_CONCURRENT.  Src and dst "
7752                                         " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
7753                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
7754                }
7755            } else {
7756                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
7757                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
7758                // or both be a valid queue family
7759                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
7760                    (src_q_f_index != dst_q_f_index)) {
7761                    skip_call |=
7762                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7763                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
7764                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
7765                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
7766                                                                     "must be.",
7767                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
7768                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
7769                           ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
7770                            (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
7771                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
7772                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7773                                         "%s: Image 0x%" PRIx64 " was created with sharingMode "
7774                                         "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
7775                                         " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
7776                                         "queueFamilies crated for this device.",
7777                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
7778                                         dst_q_f_index, dev_data->phys_dev_properties.queue_family_properties.size());
7779                }
7780            }
7781        }
7782
7783        if (mem_barrier) {
7784            skip_call |=
7785                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
7786            skip_call |=
7787                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
7788            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
7789                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7790                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
7791                                                         "PREINITIALIZED.",
7792                        funcName);
7793            }
7794            auto image_data = getImageNode(dev_data, mem_barrier->image);
7795            VkFormat format = VK_FORMAT_UNDEFINED;
7796            uint32_t arrayLayers = 0, mipLevels = 0;
7797            bool imageFound = false;
7798            if (image_data) {
7799                format = image_data->createInfo.format;
7800                arrayLayers = image_data->createInfo.arrayLayers;
7801                mipLevels = image_data->createInfo.mipLevels;
7802                imageFound = true;
7803            } else if (dev_data->device_extensions.wsi_enabled) {
7804                auto imageswap_data = getSwapchainFromImage(dev_data, mem_barrier->image);
7805                if (imageswap_data) {
7806                    auto swapchain_data = getSwapchainNode(dev_data, imageswap_data);
7807                    if (swapchain_data) {
7808                        format = swapchain_data->createInfo.imageFormat;
7809                        arrayLayers = swapchain_data->createInfo.imageArrayLayers;
7810                        mipLevels = 1;
7811                        imageFound = true;
7812                    }
7813                }
7814            }
7815            if (imageFound) {
7816                if (vk_format_is_depth_and_stencil(format) &&
7817                    (!(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
7818                     !(mem_barrier->subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT))) {
7819                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7820                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth and stencil format and thus must "
7821                                                             "have both VK_IMAGE_ASPECT_DEPTH_BIT and "
7822                                                             "VK_IMAGE_ASPECT_STENCIL_BIT set.",
7823                            funcName);
7824                }
7825                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
7826                                     ? 1
7827                                     : mem_barrier->subresourceRange.layerCount;
7828                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
7829                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7830                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
7831                                                             "baseArrayLayer (%d) and layerCount (%d) be less "
7832                                                             "than or equal to the total number of layers (%d).",
7833                            funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
7834                            arrayLayers);
7835                }
7836                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
7837                                     ? 1
7838                                     : mem_barrier->subresourceRange.levelCount;
7839                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
7840                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7841                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
7842                                                             "(%d) and levelCount (%d) be less than or equal to "
7843                                                             "the total number of levels (%d).",
7844                            funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
7845                            mipLevels);
7846                }
7847            }
7848        }
7849    }
7850    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
7851        auto mem_barrier = &pBufferMemBarriers[i];
7852        if (pCB->activeRenderPass) {
7853            skip_call |=
7854                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7855                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
7856        }
7857        if (!mem_barrier)
7858            continue;
7859
7860        // Validate buffer barrier queue family indices
7861        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
7862             mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
7863            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
7864             mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
7865            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7866                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
7867                                 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
7868                                 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
7869                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
7870                                 dev_data->phys_dev_properties.queue_family_properties.size());
7871        }
7872
7873        auto buffer_node = getBufferNode(dev_data, mem_barrier->buffer);
7874        if (buffer_node) {
7875            VkDeviceSize buffer_size =
7876                (buffer_node->createInfo.sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO) ? buffer_node->createInfo.size : 0;
7877            if (mem_barrier->offset >= buffer_size) {
7878                skip_call |= log_msg(
7879                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7880                    DRAWSTATE_INVALID_BARRIER, "DS",
7881                    "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
7882                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
7883                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(buffer_size));
7884            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
7885                skip_call |= log_msg(
7886                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7887                    DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
7888                                                     " whose sum is greater than total size 0x%" PRIx64 ".",
7889                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
7890                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
7891                    reinterpret_cast<const uint64_t &>(buffer_size));
7892            }
7893        }
7894    }
7895    return skip_call;
7896}
7897
7898bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
7899    bool skip_call = false;
7900    VkPipelineStageFlags stageMask = 0;
7901    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
7902    for (uint32_t i = 0; i < eventCount; ++i) {
7903        auto event = pCB->events[firstEventIndex + i];
7904        auto queue_data = dev_data->queueMap.find(queue);
7905        if (queue_data == dev_data->queueMap.end())
7906            return false;
7907        auto event_data = queue_data->second.eventToStageMap.find(event);
7908        if (event_data != queue_data->second.eventToStageMap.end()) {
7909            stageMask |= event_data->second;
7910        } else {
7911            auto global_event_data = dev_data->eventMap.find(event);
7912            if (global_event_data == dev_data->eventMap.end()) {
7913                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
7914                                     reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
7915                                     "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
7916                                     reinterpret_cast<const uint64_t &>(event));
7917            } else {
7918                stageMask |= global_event_data->second.stageMask;
7919            }
7920        }
7921    }
7922    // TODO: Need to validate that host_bit is only set if set event is called
7923    // but set event can be called at any time.
7924    if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
7925        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7926                             DRAWSTATE_INVALID_EVENT, "DS", "Submitting cmdbuffer with call to VkCmdWaitEvents "
7927                                                            "using srcStageMask 0x%X which must be the bitwise "
7928                                                            "OR of the stageMask parameters used in calls to "
7929                                                            "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
7930                                                            "used with vkSetEvent but instead is 0x%X.",
7931                             sourceStageMask, stageMask);
7932    }
7933    return skip_call;
7934}
7935
7936VKAPI_ATTR void VKAPI_CALL
7937CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
7938              VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
7939              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
7940              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
7941    bool skipCall = false;
7942    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7943    std::unique_lock<std::mutex> lock(global_lock);
7944    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7945    if (pCB) {
7946        auto firstEventIndex = pCB->events.size();
7947        for (uint32_t i = 0; i < eventCount; ++i) {
7948            pCB->waitedEvents.insert(pEvents[i]);
7949            pCB->events.push_back(pEvents[i]);
7950        }
7951        std::function<bool(VkQueue)> eventUpdate =
7952            std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask);
7953        pCB->eventUpdates.push_back(eventUpdate);
7954        if (pCB->state == CB_RECORDING) {
7955            skipCall |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
7956        } else {
7957            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
7958        }
7959        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
7960        skipCall |=
7961            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7962                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7963    }
7964    lock.unlock();
7965    if (!skipCall)
7966        dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
7967                                                       memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7968                                                       pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7969}
7970
7971VKAPI_ATTR void VKAPI_CALL
7972CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
7973                   VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
7974                   uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
7975                   uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
7976    bool skipCall = false;
7977    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7978    std::unique_lock<std::mutex> lock(global_lock);
7979    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7980    if (pCB) {
7981        skipCall |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
7982        skipCall |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
7983        skipCall |=
7984            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7985                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7986    }
7987    lock.unlock();
7988    if (!skipCall)
7989        dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
7990                                                            memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
7991                                                            pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
7992}
7993
7994bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
7995    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7996    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7997    if (pCB) {
7998        pCB->queryToStateMap[object] = value;
7999    }
8000    auto queue_data = dev_data->queueMap.find(queue);
8001    if (queue_data != dev_data->queueMap.end()) {
8002        queue_data->second.queryToStateMap[object] = value;
8003    }
8004    return false;
8005}
8006
8007VKAPI_ATTR void VKAPI_CALL
8008CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8009    bool skipCall = false;
8010    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8011    std::unique_lock<std::mutex> lock(global_lock);
8012    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8013    if (pCB) {
8014        QueryObject query = {queryPool, slot};
8015        pCB->activeQueries.insert(query);
8016        if (!pCB->startedQueries.count(query)) {
8017            pCB->startedQueries.insert(query);
8018        }
8019        skipCall |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8020    }
8021    lock.unlock();
8022    if (!skipCall)
8023        dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
8024}
8025
8026VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8027    bool skipCall = false;
8028    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8029    std::unique_lock<std::mutex> lock(global_lock);
8030    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8031    if (pCB) {
8032        QueryObject query = {queryPool, slot};
8033        if (!pCB->activeQueries.count(query)) {
8034            skipCall |=
8035                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8036                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d",
8037                        (uint64_t)(queryPool), slot);
8038        } else {
8039            pCB->activeQueries.erase(query);
8040        }
8041        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
8042        pCB->queryUpdates.push_back(queryUpdate);
8043        if (pCB->state == CB_RECORDING) {
8044            skipCall |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
8045        } else {
8046            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
8047        }
8048    }
8049    lock.unlock();
8050    if (!skipCall)
8051        dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
8052}
8053
8054VKAPI_ATTR void VKAPI_CALL
8055CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
8056    bool skipCall = false;
8057    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8058    std::unique_lock<std::mutex> lock(global_lock);
8059    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8060    if (pCB) {
8061        for (uint32_t i = 0; i < queryCount; i++) {
8062            QueryObject query = {queryPool, firstQuery + i};
8063            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
8064            std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false);
8065            pCB->queryUpdates.push_back(queryUpdate);
8066        }
8067        if (pCB->state == CB_RECORDING) {
8068            skipCall |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
8069        } else {
8070            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
8071        }
8072        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
8073    }
8074    lock.unlock();
8075    if (!skipCall)
8076        dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
8077}
8078
8079bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) {
8080    bool skip_call = false;
8081    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
8082    auto queue_data = dev_data->queueMap.find(queue);
8083    if (queue_data == dev_data->queueMap.end())
8084        return false;
8085    for (uint32_t i = 0; i < queryCount; i++) {
8086        QueryObject query = {queryPool, firstQuery + i};
8087        auto query_data = queue_data->second.queryToStateMap.find(query);
8088        bool fail = false;
8089        if (query_data != queue_data->second.queryToStateMap.end()) {
8090            if (!query_data->second) {
8091                fail = true;
8092            }
8093        } else {
8094            auto global_query_data = dev_data->queryToStateMap.find(query);
8095            if (global_query_data != dev_data->queryToStateMap.end()) {
8096                if (!global_query_data->second) {
8097                    fail = true;
8098                }
8099            } else {
8100                fail = true;
8101            }
8102        }
8103        if (fail) {
8104            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8105                                 DRAWSTATE_INVALID_QUERY, "DS",
8106                                 "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
8107                                 reinterpret_cast<uint64_t &>(queryPool), firstQuery + i);
8108        }
8109    }
8110    return skip_call;
8111}
8112
8113VKAPI_ATTR void VKAPI_CALL
8114CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
8115                        VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
8116    bool skipCall = false;
8117    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8118    std::unique_lock<std::mutex> lock(global_lock);
8119    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8120#if MTMERGESOURCE
8121    VkDeviceMemory mem;
8122    auto cb_node = getCBNode(dev_data, commandBuffer);
8123    skipCall |= getBufferMemory(dev_data, dstBuffer, &mem);
8124    if (cb_node) {
8125        std::function<bool()> function = [=]() {
8126            set_memory_valid(dev_data, mem, true);
8127            return false;
8128        };
8129        cb_node->validate_functions.push_back(function);
8130    }
8131    skipCall |= update_cmd_buf_and_mem_references(dev_data, commandBuffer, mem, "vkCmdCopyQueryPoolResults");
8132    // Validate that DST buffer has correct usage flags set
8133    skipCall |= validate_buffer_usage_flags(dev_data, dstBuffer, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8134                                            "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8135#endif
8136    if (pCB) {
8137        std::function<bool(VkQueue)> queryUpdate =
8138            std::bind(validateQuery, std::placeholders::_1, pCB, queryPool, queryCount, firstQuery);
8139        pCB->queryUpdates.push_back(queryUpdate);
8140        if (pCB->state == CB_RECORDING) {
8141            skipCall |= addCmd(dev_data, pCB, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
8142        } else {
8143            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
8144        }
8145        skipCall |= insideRenderPass(dev_data, pCB, "vkCmdCopyQueryPoolResults");
8146    }
8147    lock.unlock();
8148    if (!skipCall)
8149        dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
8150                                                                 dstOffset, stride, flags);
8151}
8152
8153VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
8154                                            VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
8155                                            const void *pValues) {
8156    bool skipCall = false;
8157    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8158    std::unique_lock<std::mutex> lock(global_lock);
8159    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8160    if (pCB) {
8161        if (pCB->state == CB_RECORDING) {
8162            skipCall |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
8163        } else {
8164            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
8165        }
8166    }
8167    skipCall |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
8168    if (0 == stageFlags) {
8169        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8170                            DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() call has no stageFlags set.");
8171    }
8172
8173    // Check if push constant update is within any of the ranges with the same stage flags specified in pipeline layout.
8174    auto pipeline_layout = getPipelineLayout(dev_data, layout);
8175    if (!pipeline_layout) {
8176        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8177                            DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() Pipeline Layout 0x%" PRIx64 " not found.",
8178                            (uint64_t)layout);
8179    } else {
8180        // Coalesce adjacent/overlapping pipeline ranges before checking to see if incoming range is
8181        // contained in the pipeline ranges.
8182        // Build a {start, end} span list for ranges with matching stage flags.
8183        const auto &ranges = pipeline_layout->pushConstantRanges;
8184        struct span {
8185            uint32_t start;
8186            uint32_t end;
8187        };
8188        std::vector<span> spans;
8189        spans.reserve(ranges.size());
8190        for (const auto &iter : ranges) {
8191            if (iter.stageFlags == stageFlags) {
8192                spans.push_back({iter.offset, iter.offset + iter.size});
8193            }
8194        }
8195        if (spans.size() == 0) {
8196            // There were no ranges that matched the stageFlags.
8197            skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8198                                DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS",
8199                                "vkCmdPushConstants() stageFlags = 0x%" PRIx32 " do not match "
8200                                "the stageFlags in any of the ranges in pipeline layout 0x%" PRIx64 ".",
8201                                (uint32_t)stageFlags, (uint64_t)layout);
8202        } else {
8203            // Sort span list by start value.
8204            struct comparer {
8205                bool operator()(struct span i, struct span j) { return i.start < j.start; }
8206            } my_comparer;
8207            std::sort(spans.begin(), spans.end(), my_comparer);
8208
8209            // Examine two spans at a time.
8210            std::vector<span>::iterator current = spans.begin();
8211            std::vector<span>::iterator next = current + 1;
8212            while (next != spans.end()) {
8213                if (current->end < next->start) {
8214                    // There is a gap; cannot coalesce. Move to the next two spans.
8215                    ++current;
8216                    ++next;
8217                } else {
8218                    // Coalesce the two spans.  The start of the next span
8219                    // is within the current span, so pick the larger of
8220                    // the end values to extend the current span.
8221                    // Then delete the next span and set next to the span after it.
8222                    current->end = max(current->end, next->end);
8223                    next = spans.erase(next);
8224                }
8225            }
8226
8227            // Now we can check if the incoming range is within any of the spans.
8228            bool contained_in_a_range = false;
8229            for (uint32_t i = 0; i < spans.size(); ++i) {
8230                if ((offset >= spans[i].start) && ((uint64_t)offset + (uint64_t)size <= (uint64_t)spans[i].end)) {
8231                    contained_in_a_range = true;
8232                    break;
8233                }
8234            }
8235            if (!contained_in_a_range) {
8236                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8237                                    __LINE__, DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS",
8238                                    "vkCmdPushConstants() Push constant range [%d, %d) "
8239                                    "with stageFlags = 0x%" PRIx32 " "
8240                                    "not within flag-matching ranges in pipeline layout 0x%" PRIx64 ".",
8241                                    offset, offset + size, (uint32_t)stageFlags, (uint64_t)layout);
8242            }
8243        }
8244    }
8245    lock.unlock();
8246    if (!skipCall)
8247        dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
8248}
8249
8250VKAPI_ATTR void VKAPI_CALL
8251CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
8252    bool skipCall = false;
8253    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8254    std::unique_lock<std::mutex> lock(global_lock);
8255    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8256    if (pCB) {
8257        QueryObject query = {queryPool, slot};
8258        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
8259        pCB->queryUpdates.push_back(queryUpdate);
8260        if (pCB->state == CB_RECORDING) {
8261            skipCall |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
8262        } else {
8263            skipCall |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
8264        }
8265    }
8266    lock.unlock();
8267    if (!skipCall)
8268        dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
8269}
8270
8271static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
8272                       const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag) {
8273    bool skip_call = false;
8274
8275    for (uint32_t attach = 0; attach < count; attach++) {
8276        if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
8277            // Attachment counts are verified elsewhere, but prevent an invalid access
8278            if (attachments[attach].attachment < fbci->attachmentCount) {
8279                const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
8280                VkImageViewCreateInfo *ivci = getImageViewData(dev_data, *image_view);
8281                if (ivci != nullptr) {
8282                    const VkImageCreateInfo *ici = &getImageNode(dev_data, ivci->image)->createInfo;
8283                    if (ici != nullptr) {
8284                        if ((ici->usage & usage_flag) == 0) {
8285                            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
8286                                                 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_USAGE, "DS",
8287                                                 "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
8288                                                 "IMAGE_USAGE flags (%s).",
8289                                                 attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
8290                        }
8291                    }
8292                }
8293            }
8294        }
8295    }
8296    return skip_call;
8297}
8298
8299// Validate VkFramebufferCreateInfo which includes:
8300// 1. attachmentCount equals renderPass attachmentCount
8301// 2. corresponding framebuffer and renderpass attachments have matching formats
8302// 3. corresponding framebuffer and renderpass attachments have matching sample counts
8303// 4. fb attachments only have a single mip level
8304// 5. fb attachment dimensions are each at least as large as the fb
8305// 6. fb attachments use idenity swizzle
8306// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
8307// 8. fb dimensions are within physical device limits
8308static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
8309    bool skip_call = false;
8310
8311    auto rp_node = getRenderPass(dev_data, pCreateInfo->renderPass);
8312    if (rp_node) {
8313        const VkRenderPassCreateInfo *rpci = rp_node->pCreateInfo;
8314        if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
8315            skip_call |= log_msg(
8316                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8317                reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
8318                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of "
8319                "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer.",
8320                pCreateInfo->attachmentCount, rpci->attachmentCount, reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
8321        } else {
8322            // attachmentCounts match, so make sure corresponding attachment details line up
8323            const VkImageView *image_views = pCreateInfo->pAttachments;
8324            for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8325                VkImageViewCreateInfo *ivci = getImageViewData(dev_data, image_views[i]);
8326                if (ivci->format != rpci->pAttachments[i].format) {
8327                    skip_call |= log_msg(
8328                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8329                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
8330                        "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match "
8331                              "the format of "
8332                              "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").",
8333                        i, string_VkFormat(ivci->format), string_VkFormat(rpci->pAttachments[i].format),
8334                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
8335                }
8336                const VkImageCreateInfo *ici = &getImageNode(dev_data, ivci->image)->createInfo;
8337                if (ici->samples != rpci->pAttachments[i].samples) {
8338                    skip_call |= log_msg(
8339                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8340                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
8341                        "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match "
8342                              "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").",
8343                        i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
8344                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
8345                }
8346                // Verify that view only has a single mip level
8347                if (ivci->subresourceRange.levelCount != 1) {
8348                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
8349                                         __LINE__, DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
8350                                         "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u "
8351                                         "but only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer.",
8352                                         i, ivci->subresourceRange.levelCount);
8353                }
8354                const uint32_t mip_level = ivci->subresourceRange.baseMipLevel;
8355                uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
8356                uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
8357                if ((ivci->subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
8358                    (mip_height < pCreateInfo->height)) {
8359                    skip_call |=
8360                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
8361                                DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
8362                                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller "
8363                                "than the corresponding "
8364                                "framebuffer dimensions. Attachment dimensions must be at least as large. Here are the respective "
8365                                "dimensions for "
8366                                "attachment #%u, framebuffer:\n"
8367                                "width: %u, %u\n"
8368                                "height: %u, %u\n"
8369                                "layerCount: %u, %u\n",
8370                                i, ivci->subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
8371                                pCreateInfo->height, ivci->subresourceRange.layerCount, pCreateInfo->layers);
8372                }
8373                if (((ivci->components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci->components.r != VK_COMPONENT_SWIZZLE_R)) ||
8374                    ((ivci->components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci->components.g != VK_COMPONENT_SWIZZLE_G)) ||
8375                    ((ivci->components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci->components.b != VK_COMPONENT_SWIZZLE_B)) ||
8376                    ((ivci->components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci->components.a != VK_COMPONENT_SWIZZLE_A))) {
8377                    skip_call |= log_msg(
8378                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
8379                        DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
8380                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer "
8381                        "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n"
8382                        "r swizzle = %s\n"
8383                        "g swizzle = %s\n"
8384                        "b swizzle = %s\n"
8385                        "a swizzle = %s\n",
8386                        i, string_VkComponentSwizzle(ivci->components.r), string_VkComponentSwizzle(ivci->components.g),
8387                        string_VkComponentSwizzle(ivci->components.b), string_VkComponentSwizzle(ivci->components.a));
8388                }
8389            }
8390        }
8391        // Verify correct attachment usage flags
8392        for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
8393            // Verify input attachments:
8394            skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount,
8395                                    rpci->pSubpasses[subpass].pInputAttachments, pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT);
8396            // Verify color attachments:
8397            skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount,
8398                                    rpci->pSubpasses[subpass].pColorAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
8399            // Verify depth/stencil attachments:
8400            if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
8401                skip_call |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
8402                                        VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
8403            }
8404        }
8405    } else {
8406        skip_call |=
8407            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
8408                    reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8409                    "vkCreateFramebuffer(): Attempt to create framebuffer with invalid renderPass (0x%" PRIxLEAST64 ").",
8410                    reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
8411    }
8412    // Verify FB dimensions are within physical device limits
8413    if ((pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) ||
8414        (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) ||
8415        (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers)) {
8416        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
8417                             DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
8418                             "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo dimensions exceed physical device limits. "
8419                             "Here are the respective dimensions: requested, device max:\n"
8420                             "width: %u, %u\n"
8421                             "height: %u, %u\n"
8422                             "layerCount: %u, %u\n",
8423                             pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
8424                             pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
8425                             pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers);
8426    }
8427    return skip_call;
8428}
8429
8430// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
8431//  Return true if an error is encountered and callback returns true to skip call down chain
8432//   false indicates that call down chain should proceed
8433static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
8434    // TODO : Verify that renderPass FB is created with is compatible with FB
8435    bool skip_call = false;
8436    skip_call |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
8437    return skip_call;
8438}
8439
8440// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
8441static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
8442    // Shadow create info and store in map
8443    std::unique_ptr<FRAMEBUFFER_NODE> fb_node(
8444        new FRAMEBUFFER_NODE(fb, pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->pCreateInfo));
8445
8446    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8447        VkImageView view = pCreateInfo->pAttachments[i];
8448        auto view_data = getImageViewData(dev_data, view);
8449        if (!view_data) {
8450            continue;
8451        }
8452        MT_FB_ATTACHMENT_INFO fb_info;
8453        getImageMemory(dev_data, view_data->image, &fb_info.mem);
8454        fb_info.image = view_data->image;
8455        fb_node->attachments.push_back(fb_info);
8456    }
8457    dev_data->frameBufferMap[fb] = std::move(fb_node);
8458}
8459
8460VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
8461                                                 const VkAllocationCallbacks *pAllocator,
8462                                                 VkFramebuffer *pFramebuffer) {
8463    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8464    std::unique_lock<std::mutex> lock(global_lock);
8465    bool skip_call = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
8466    lock.unlock();
8467
8468    if (skip_call)
8469        return VK_ERROR_VALIDATION_FAILED_EXT;
8470
8471    VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
8472
8473    if (VK_SUCCESS == result) {
8474        lock.lock();
8475        PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
8476        lock.unlock();
8477    }
8478    return result;
8479}
8480
8481static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
8482                           std::unordered_set<uint32_t> &processed_nodes) {
8483    // If we have already checked this node we have not found a dependency path so return false.
8484    if (processed_nodes.count(index))
8485        return false;
8486    processed_nodes.insert(index);
8487    const DAGNode &node = subpass_to_node[index];
8488    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
8489    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
8490        for (auto elem : node.prev) {
8491            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
8492                return true;
8493        }
8494    } else {
8495        return true;
8496    }
8497    return false;
8498}
8499
8500static bool CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
8501                                  const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
8502    bool result = true;
8503    // Loop through all subpasses that share the same attachment and make sure a dependency exists
8504    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
8505        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k])
8506            continue;
8507        const DAGNode &node = subpass_to_node[subpass];
8508        // Check for a specified dependency between the two nodes. If one exists we are done.
8509        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
8510        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
8511        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
8512            // If no dependency exits an implicit dependency still might. If not, throw an error.
8513            std::unordered_set<uint32_t> processed_nodes;
8514            if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
8515                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
8516                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8517                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8518                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
8519                                     dependent_subpasses[k]);
8520                result = false;
8521            }
8522        }
8523    }
8524    return result;
8525}
8526
8527static bool CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
8528                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
8529    const DAGNode &node = subpass_to_node[index];
8530    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
8531    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8532    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8533        if (attachment == subpass.pColorAttachments[j].attachment)
8534            return true;
8535    }
8536    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8537        if (attachment == subpass.pDepthStencilAttachment->attachment)
8538            return true;
8539    }
8540    bool result = false;
8541    // Loop through previous nodes and see if any of them write to the attachment.
8542    for (auto elem : node.prev) {
8543        result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
8544    }
8545    // If the attachment was written to by a previous node than this node needs to preserve it.
8546    if (result && depth > 0) {
8547        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
8548        bool has_preserved = false;
8549        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8550            if (subpass.pPreserveAttachments[j] == attachment) {
8551                has_preserved = true;
8552                break;
8553            }
8554        }
8555        if (!has_preserved) {
8556            skip_call |=
8557                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8558                        DRAWSTATE_INVALID_RENDERPASS, "DS",
8559                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
8560        }
8561    }
8562    return result;
8563}
8564
8565template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
8566    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
8567           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
8568}
8569
8570bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
8571    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
8572            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
8573}
8574
8575static bool ValidateDependencies(const layer_data *my_data, FRAMEBUFFER_NODE const * framebuffer,
8576                                 RENDER_PASS_NODE const * renderPass) {
8577    bool skip_call = false;
8578    const safe_VkFramebufferCreateInfo *pFramebufferInfo = &framebuffer->createInfo;
8579    const VkRenderPassCreateInfo *pCreateInfo = renderPass->pCreateInfo;
8580    auto const & subpass_to_node = renderPass->subpassToNode;
8581    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
8582    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
8583    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
8584    // Find overlapping attachments
8585    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
8586        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
8587            VkImageView viewi = pFramebufferInfo->pAttachments[i];
8588            VkImageView viewj = pFramebufferInfo->pAttachments[j];
8589            if (viewi == viewj) {
8590                overlapping_attachments[i].push_back(j);
8591                overlapping_attachments[j].push_back(i);
8592                continue;
8593            }
8594            auto view_data_i = getImageViewData(my_data, viewi);
8595            auto view_data_j = getImageViewData(my_data, viewj);
8596            if (!view_data_i || !view_data_j) {
8597                continue;
8598            }
8599            if (view_data_i->image == view_data_j->image &&
8600                isRegionOverlapping(view_data_i->subresourceRange, view_data_j->subresourceRange)) {
8601                overlapping_attachments[i].push_back(j);
8602                overlapping_attachments[j].push_back(i);
8603                continue;
8604            }
8605            auto image_data_i = getImageNode(my_data, view_data_i->image);
8606            auto image_data_j = getImageNode(my_data, view_data_j->image);
8607            if (!image_data_i || !image_data_j) {
8608                continue;
8609            }
8610            if (image_data_i->mem == image_data_j->mem && isRangeOverlapping(image_data_i->memOffset, image_data_i->memSize,
8611                                                                             image_data_j->memOffset, image_data_j->memSize)) {
8612                overlapping_attachments[i].push_back(j);
8613                overlapping_attachments[j].push_back(i);
8614            }
8615        }
8616    }
8617    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
8618        uint32_t attachment = i;
8619        for (auto other_attachment : overlapping_attachments[i]) {
8620            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8621                skip_call |=
8622                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8623                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
8624                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8625                            attachment, other_attachment);
8626            }
8627            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
8628                skip_call |=
8629                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8630                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
8631                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
8632                            other_attachment, attachment);
8633            }
8634        }
8635    }
8636    // Find for each attachment the subpasses that use them.
8637    unordered_set<uint32_t> attachmentIndices;
8638    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8639        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8640        attachmentIndices.clear();
8641        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8642            uint32_t attachment = subpass.pInputAttachments[j].attachment;
8643            if (attachment == VK_ATTACHMENT_UNUSED)
8644                continue;
8645            input_attachment_to_subpass[attachment].push_back(i);
8646            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8647                input_attachment_to_subpass[overlapping_attachment].push_back(i);
8648            }
8649        }
8650        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8651            uint32_t attachment = subpass.pColorAttachments[j].attachment;
8652            if (attachment == VK_ATTACHMENT_UNUSED)
8653                continue;
8654            output_attachment_to_subpass[attachment].push_back(i);
8655            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8656                output_attachment_to_subpass[overlapping_attachment].push_back(i);
8657            }
8658            attachmentIndices.insert(attachment);
8659        }
8660        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8661            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8662            output_attachment_to_subpass[attachment].push_back(i);
8663            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
8664                output_attachment_to_subpass[overlapping_attachment].push_back(i);
8665            }
8666
8667            if (attachmentIndices.count(attachment)) {
8668                skip_call |=
8669                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
8670                            0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8671                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).",
8672                            attachment, i);
8673            }
8674        }
8675    }
8676    // If there is a dependency needed make sure one exists
8677    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8678        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8679        // If the attachment is an input then all subpasses that output must have a dependency relationship
8680        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8681            uint32_t attachment = subpass.pInputAttachments[j].attachment;
8682            if (attachment == VK_ATTACHMENT_UNUSED)
8683                continue;
8684            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8685        }
8686        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
8687        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8688            uint32_t attachment = subpass.pColorAttachments[j].attachment;
8689            if (attachment == VK_ATTACHMENT_UNUSED)
8690                continue;
8691            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8692            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8693        }
8694        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8695            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
8696            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8697            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
8698        }
8699    }
8700    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
8701    // written.
8702    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8703        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8704        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8705            CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
8706        }
8707    }
8708    return skip_call;
8709}
8710// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
8711// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that
8712// READ_ONLY layout attachments don't have CLEAR as their loadOp.
8713static bool ValidateLayoutVsAttachmentDescription(debug_report_data *report_data, const VkImageLayout first_layout,
8714                                                  const uint32_t attachment,
8715                                                  const VkAttachmentDescription &attachment_description) {
8716    bool skip_call = false;
8717    // Verify that initial loadOp on READ_ONLY attachments is not CLEAR
8718    if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
8719        if ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
8720            (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)) {
8721            skip_call |=
8722                log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
8723                        VkDebugReportObjectTypeEXT(0), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8724                        "Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout));
8725        }
8726    }
8727    return skip_call;
8728}
8729
8730static bool ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
8731    bool skip = false;
8732
8733    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8734        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8735        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8736            auto attach_index = subpass.pInputAttachments[j].attachment;
8737            if (attach_index == VK_ATTACHMENT_UNUSED)
8738                continue;
8739
8740            if (subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL &&
8741                subpass.pInputAttachments[j].layout != VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
8742                if (subpass.pInputAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
8743                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8744                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8745                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8746                                    "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
8747                } else {
8748                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8749                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8750                                    "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
8751                                    string_VkImageLayout(subpass.pInputAttachments[j].layout));
8752                }
8753            }
8754            skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pInputAttachments[j].layout, attach_index,
8755                                                          pCreateInfo->pAttachments[attach_index]);
8756        }
8757        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8758            auto attach_index = subpass.pColorAttachments[j].attachment;
8759            if (attach_index == VK_ATTACHMENT_UNUSED)
8760                continue;
8761
8762            if (subpass.pColorAttachments[j].layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
8763                if (subpass.pColorAttachments[j].layout == VK_IMAGE_LAYOUT_GENERAL) {
8764                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8765                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8766                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8767                                    "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
8768                } else {
8769                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8770                                    DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8771                                    "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
8772                                    string_VkImageLayout(subpass.pColorAttachments[j].layout));
8773                }
8774            }
8775            skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pColorAttachments[j].layout, attach_index,
8776                                                          pCreateInfo->pAttachments[attach_index]);
8777        }
8778        if ((subpass.pDepthStencilAttachment != NULL) && (subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
8779            if (subpass.pDepthStencilAttachment->layout != VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL) {
8780                if (subpass.pDepthStencilAttachment->layout == VK_IMAGE_LAYOUT_GENERAL) {
8781                    // TODO: Verify Valid Use in spec. I believe this is allowed (valid) but may not be optimal performance
8782                    skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8783                                    (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8784                                    "Layout for depth attachment is GENERAL but should be DEPTH_STENCIL_ATTACHMENT_OPTIMAL.");
8785                } else {
8786                    skip |=
8787                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8788                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8789                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL or GENERAL.",
8790                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
8791                }
8792            }
8793            auto attach_index = subpass.pDepthStencilAttachment->attachment;
8794            skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pDepthStencilAttachment->layout,
8795                                                          attach_index, pCreateInfo->pAttachments[attach_index]);
8796        }
8797    }
8798    return skip;
8799}
8800
8801static bool CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
8802                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
8803    bool skip_call = false;
8804    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8805        DAGNode &subpass_node = subpass_to_node[i];
8806        subpass_node.pass = i;
8807    }
8808    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
8809        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
8810        if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
8811            dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
8812            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8813                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
8814                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
8815        } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
8816            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8817                                 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
8818        } else if (dependency.srcSubpass == dependency.dstSubpass) {
8819            has_self_dependency[dependency.srcSubpass] = true;
8820        }
8821        if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
8822            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
8823        }
8824        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
8825            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
8826        }
8827    }
8828    return skip_call;
8829}
8830
8831
8832VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
8833                                                  const VkAllocationCallbacks *pAllocator,
8834                                                  VkShaderModule *pShaderModule) {
8835    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8836    bool skip_call = false;
8837
8838    /* Use SPIRV-Tools validator to try and catch any issues with the module itself */
8839    spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
8840    spv_const_binary_t binary { pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t) };
8841    spv_diagnostic diag = nullptr;
8842
8843    auto result = spvValidate(ctx, &binary, &diag);
8844    if (result != SPV_SUCCESS) {
8845        skip_call |= log_msg(my_data->report_data,
8846                             result == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
8847                             VkDebugReportObjectTypeEXT(0), 0,
8848                             __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC", "SPIR-V module not valid: %s",
8849                             diag && diag->error ? diag->error : "(no error text)");
8850    }
8851
8852    spvDiagnosticDestroy(diag);
8853    spvContextDestroy(ctx);
8854
8855    if (skip_call)
8856        return VK_ERROR_VALIDATION_FAILED_EXT;
8857
8858    VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
8859
8860    if (res == VK_SUCCESS) {
8861        std::lock_guard<std::mutex> lock(global_lock);
8862        my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
8863    }
8864    return res;
8865}
8866
8867static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
8868    bool skip_call = false;
8869    if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
8870        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8871                             DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
8872                             "CreateRenderPass: %s attachment %d cannot be greater than the total number of attachments %d.",
8873                             type, attachment, attachment_count);
8874    }
8875    return skip_call;
8876}
8877
8878static bool IsPowerOfTwo(unsigned x) {
8879    return x && !(x & (x-1));
8880}
8881
8882static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
8883    bool skip_call = false;
8884    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
8885        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
8886        if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
8887            skip_call |=
8888                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8889                        DRAWSTATE_INVALID_RENDERPASS, "DS",
8890                        "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
8891        }
8892        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
8893            uint32_t attachment = subpass.pPreserveAttachments[j];
8894            if (attachment == VK_ATTACHMENT_UNUSED) {
8895                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8896                                     __LINE__, DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
8897                                     "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", j);
8898            } else {
8899                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
8900            }
8901        }
8902
8903        auto subpass_performs_resolve = subpass.pResolveAttachments && std::any_of(
8904            subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
8905            [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
8906
8907        unsigned sample_count = 0;
8908
8909        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
8910            uint32_t attachment;
8911            if (subpass.pResolveAttachments) {
8912                attachment = subpass.pResolveAttachments[j].attachment;
8913                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
8914
8915                if (!skip_call && attachment != VK_ATTACHMENT_UNUSED &&
8916                    pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
8917                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
8918                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8919                                         "CreateRenderPass:  Subpass %u requests multisample resolve into attachment %u, "
8920                                         "which must have VK_SAMPLE_COUNT_1_BIT but has %s",
8921                                         i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples));
8922                }
8923            }
8924            attachment = subpass.pColorAttachments[j].attachment;
8925            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
8926
8927            if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
8928                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
8929
8930                if (subpass_performs_resolve &&
8931                    pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
8932                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
8933                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8934                                         "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
8935                                         "which has VK_SAMPLE_COUNT_1_BIT",
8936                                         i, attachment);
8937                }
8938            }
8939        }
8940
8941        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
8942            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
8943            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
8944
8945            if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
8946                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
8947            }
8948        }
8949
8950        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
8951            uint32_t attachment = subpass.pInputAttachments[j].attachment;
8952            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
8953        }
8954
8955        if (sample_count && !IsPowerOfTwo(sample_count)) {
8956            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
8957                                 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
8958                                 "CreateRenderPass:  Subpass %u attempts to render to "
8959                                 "attachments with inconsistent sample counts",
8960                                 i);
8961        }
8962    }
8963    return skip_call;
8964}
8965
8966VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
8967                                                const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
8968    bool skip_call = false;
8969    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
8970
8971    std::unique_lock<std::mutex> lock(global_lock);
8972
8973    skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
8974    // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
8975    //       ValidateLayouts.
8976    skip_call |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
8977    lock.unlock();
8978
8979    if (skip_call) {
8980        return VK_ERROR_VALIDATION_FAILED_EXT;
8981    }
8982
8983    VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
8984
8985    if (VK_SUCCESS == result) {
8986        lock.lock();
8987
8988        std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
8989        std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
8990        skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
8991
8992        // Shadow create info and store in map
8993        VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
8994        if (pCreateInfo->pAttachments) {
8995            localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount];
8996            memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments,
8997                   localRPCI->attachmentCount * sizeof(VkAttachmentDescription));
8998        }
8999        if (pCreateInfo->pSubpasses) {
9000            localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount];
9001            memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription));
9002
9003            for (uint32_t i = 0; i < localRPCI->subpassCount; i++) {
9004                VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i];
9005                const uint32_t attachmentCount = subpass->inputAttachmentCount +
9006                                                 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) +
9007                                                 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
9008                VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount];
9009
9010                memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount);
9011                subpass->pInputAttachments = attachments;
9012                attachments += subpass->inputAttachmentCount;
9013
9014                memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9015                subpass->pColorAttachments = attachments;
9016                attachments += subpass->colorAttachmentCount;
9017
9018                if (subpass->pResolveAttachments) {
9019                    memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9020                    subpass->pResolveAttachments = attachments;
9021                    attachments += subpass->colorAttachmentCount;
9022                }
9023
9024                if (subpass->pDepthStencilAttachment) {
9025                    memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1);
9026                    subpass->pDepthStencilAttachment = attachments;
9027                    attachments += 1;
9028                }
9029
9030                memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount);
9031                subpass->pPreserveAttachments = &attachments->attachment;
9032            }
9033        }
9034        if (pCreateInfo->pDependencies) {
9035            localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
9036            memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies,
9037                   localRPCI->dependencyCount * sizeof(VkSubpassDependency));
9038        }
9039
9040        auto render_pass = new RENDER_PASS_NODE(localRPCI);
9041        render_pass->renderPass = *pRenderPass;
9042        render_pass->hasSelfDependency = has_self_dependency;
9043        render_pass->subpassToNode = subpass_to_node;
9044#if MTMERGESOURCE
9045        // MTMTODO : Merge with code from above to eliminate duplication
9046        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9047            VkAttachmentDescription desc = pCreateInfo->pAttachments[i];
9048            MT_PASS_ATTACHMENT_INFO pass_info;
9049            pass_info.load_op = desc.loadOp;
9050            pass_info.store_op = desc.storeOp;
9051            pass_info.stencil_load_op = desc.stencilLoadOp;
9052            pass_info.stencil_store_op = desc.stencilStoreOp;
9053            pass_info.attachment = i;
9054            render_pass->attachments.push_back(pass_info);
9055        }
9056        // TODO: Maybe fill list and then copy instead of locking
9057        std::unordered_map<uint32_t, bool> &attachment_first_read = render_pass->attachment_first_read;
9058        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = render_pass->attachment_first_layout;
9059        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9060            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9061            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9062                uint32_t attachment = subpass.pColorAttachments[j].attachment;
9063                if (!attachment_first_read.count(attachment)) {
9064                    attachment_first_read.insert(std::make_pair(attachment, false));
9065                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
9066                }
9067            }
9068            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9069                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9070                if (!attachment_first_read.count(attachment)) {
9071                    attachment_first_read.insert(std::make_pair(attachment, false));
9072                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
9073                }
9074            }
9075            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9076                uint32_t attachment = subpass.pInputAttachments[j].attachment;
9077                if (!attachment_first_read.count(attachment)) {
9078                    attachment_first_read.insert(std::make_pair(attachment, true));
9079                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
9080                }
9081            }
9082        }
9083#endif
9084        dev_data->renderPassMap[*pRenderPass] = render_pass;
9085    }
9086    return result;
9087}
9088
9089// Free the renderpass shadow
9090static void deleteRenderPasses(layer_data *my_data) {
9091    for (auto renderPass : my_data->renderPassMap) {
9092        const VkRenderPassCreateInfo *pRenderPassInfo = renderPass.second->pCreateInfo;
9093        delete[] pRenderPassInfo->pAttachments;
9094        if (pRenderPassInfo->pSubpasses) {
9095            for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) {
9096                // Attachements are all allocated in a block, so just need to
9097                //  find the first non-null one to delete
9098                if (pRenderPassInfo->pSubpasses[i].pInputAttachments) {
9099                    delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments;
9100                } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) {
9101                    delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments;
9102                } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) {
9103                    delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments;
9104                } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) {
9105                    delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments;
9106                }
9107            }
9108            delete[] pRenderPassInfo->pSubpasses;
9109        }
9110        delete[] pRenderPassInfo->pDependencies;
9111        delete pRenderPassInfo;
9112        delete renderPass.second;
9113    }
9114    my_data->renderPassMap.clear();
9115}
9116
9117static bool VerifyFramebufferAndRenderPassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
9118    bool skip_call = false;
9119    const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
9120    const safe_VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer]->createInfo;
9121    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
9122        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9123                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
9124                                                                 "with a different number of attachments.");
9125    }
9126    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9127        const VkImageView &image_view = framebufferInfo.pAttachments[i];
9128        auto image_data = getImageViewData(dev_data, image_view);
9129        assert(image_data);
9130        const VkImage &image = image_data->image;
9131        const VkImageSubresourceRange &subRange = image_data->subresourceRange;
9132        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
9133                                             pRenderPassInfo->pAttachments[i].initialLayout};
9134        // TODO: Do not iterate over every possibility - consolidate where possible
9135        for (uint32_t j = 0; j < subRange.levelCount; j++) {
9136            uint32_t level = subRange.baseMipLevel + j;
9137            for (uint32_t k = 0; k < subRange.layerCount; k++) {
9138                uint32_t layer = subRange.baseArrayLayer + k;
9139                VkImageSubresource sub = {subRange.aspectMask, level, layer};
9140                IMAGE_CMD_BUF_LAYOUT_NODE node;
9141                if (!FindLayout(pCB, image, sub, node)) {
9142                    SetLayout(pCB, image, sub, newNode);
9143                    continue;
9144                }
9145                if (newNode.layout != VK_IMAGE_LAYOUT_UNDEFINED &&
9146                    newNode.layout != node.layout) {
9147                    skip_call |=
9148                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9149                                DRAWSTATE_INVALID_RENDERPASS, "DS",
9150                                "You cannot start a render pass using attachment %u "
9151                                "where the render pass initial layout is %s and the previous "
9152                                "known layout of the attachment is %s. The layouts must match, or "
9153                                "the render pass initial layout for the attachment must be "
9154                                "VK_IMAGE_LAYOUT_UNDEFINED",
9155                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
9156                }
9157            }
9158        }
9159    }
9160    return skip_call;
9161}
9162
9163static void TransitionAttachmentRefLayout(layer_data *dev_data, GLOBAL_CB_NODE *pCB,
9164                                          FRAMEBUFFER_NODE *pFramebuffer,
9165                                          VkAttachmentReference ref)
9166{
9167    if (ref.attachment != VK_ATTACHMENT_UNUSED) {
9168        auto image_view = pFramebuffer->createInfo.pAttachments[ref.attachment];
9169        SetLayout(dev_data, pCB, image_view, ref.layout);
9170    }
9171}
9172
9173static void TransitionSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
9174                                     const int subpass_index) {
9175    auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
9176    if (!renderPass)
9177        return;
9178
9179    auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer);
9180    if (!framebuffer)
9181        return;
9182
9183    const safe_VkFramebufferCreateInfo &framebufferInfo = framebuffer->createInfo;
9184    const VkSubpassDescription &subpass = renderPass->pCreateInfo->pSubpasses[subpass_index];
9185    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9186        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pInputAttachments[j]);
9187    }
9188    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9189        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pColorAttachments[j]);
9190    }
9191    if (subpass.pDepthStencilAttachment) {
9192        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, *subpass.pDepthStencilAttachment);
9193    }
9194}
9195
9196static bool validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
9197    bool skip_call = false;
9198    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
9199        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9200                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
9201                             cmd_name.c_str());
9202    }
9203    return skip_call;
9204}
9205
9206static void TransitionFinalSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
9207    auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
9208    if (!renderPass)
9209        return;
9210
9211    const VkRenderPassCreateInfo *pRenderPassInfo = renderPass->pCreateInfo;
9212    auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer);
9213    if (!framebuffer)
9214        return;
9215
9216    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
9217        auto image_view = framebuffer->createInfo.pAttachments[i];
9218        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
9219    }
9220}
9221
9222static bool VerifyRenderAreaBounds(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
9223    bool skip_call = false;
9224    const safe_VkFramebufferCreateInfo *pFramebufferInfo = &getFramebuffer(my_data, pRenderPassBegin->framebuffer)->createInfo;
9225    if (pRenderPassBegin->renderArea.offset.x < 0 ||
9226        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
9227        pRenderPassBegin->renderArea.offset.y < 0 ||
9228        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
9229        skip_call |= static_cast<bool>(log_msg(
9230            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9231            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
9232            "Cannot execute a render pass with renderArea not within the bound of the "
9233            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
9234            "height %d.",
9235            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
9236            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
9237    }
9238    return skip_call;
9239}
9240
9241// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
9242// [load|store]Op flag must be checked
9243// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
9244template <typename T> static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
9245    if (color_depth_op != op && stencil_op != op) {
9246        return false;
9247    }
9248    bool check_color_depth_load_op = !vk_format_is_stencil_only(format);
9249    bool check_stencil_load_op = vk_format_is_depth_and_stencil(format) || !check_color_depth_load_op;
9250
9251    return (((check_color_depth_load_op == true) && (color_depth_op == op)) ||
9252            ((check_stencil_load_op == true) && (stencil_op == op)));
9253}
9254
9255VKAPI_ATTR void VKAPI_CALL
9256CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
9257    bool skipCall = false;
9258    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9259    std::unique_lock<std::mutex> lock(global_lock);
9260    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9261    auto renderPass = pRenderPassBegin ? getRenderPass(dev_data, pRenderPassBegin->renderPass) : nullptr;
9262    auto framebuffer = pRenderPassBegin ? getFramebuffer(dev_data, pRenderPassBegin->framebuffer) : nullptr;
9263    if (pCB) {
9264        if (renderPass) {
9265            uint32_t clear_op_count = 0;
9266            pCB->activeFramebuffer = pRenderPassBegin->framebuffer;
9267            for (size_t i = 0; i < renderPass->attachments.size(); ++i) {
9268                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
9269                VkFormat format = renderPass->pCreateInfo->pAttachments[renderPass->attachments[i].attachment].format;
9270                if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op,
9271                                                         renderPass->attachments[i].stencil_load_op,
9272                                                         VK_ATTACHMENT_LOAD_OP_CLEAR)) {
9273                    ++clear_op_count;
9274                    std::function<bool()> function = [=]() {
9275                        set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9276                        return false;
9277                    };
9278                    pCB->validate_functions.push_back(function);
9279                } else if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op,
9280                                                                renderPass->attachments[i].stencil_load_op,
9281                                                                VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
9282                    std::function<bool()> function = [=]() {
9283                        set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9284                        return false;
9285                    };
9286                    pCB->validate_functions.push_back(function);
9287                } else if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op,
9288                                                                renderPass->attachments[i].stencil_load_op,
9289                                                                VK_ATTACHMENT_LOAD_OP_LOAD)) {
9290                    std::function<bool()> function = [=]() {
9291                        return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9292                    };
9293                    pCB->validate_functions.push_back(function);
9294                }
9295                if (renderPass->attachment_first_read[renderPass->attachments[i].attachment]) {
9296                    std::function<bool()> function = [=]() {
9297                        return validate_memory_is_valid(dev_data, fb_info.mem, "vkCmdBeginRenderPass()", fb_info.image);
9298                    };
9299                    pCB->validate_functions.push_back(function);
9300                }
9301            }
9302            if (clear_op_count > pRenderPassBegin->clearValueCount) {
9303                skipCall |= log_msg(
9304                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9305                    reinterpret_cast<uint64_t &>(renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9306                    "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but the actual number "
9307                    "of attachments in renderPass 0x%" PRIx64 " that use VK_ATTACHMENT_LOAD_OP_CLEAR is %u. The clearValueCount "
9308                    "must therefore be greater than or equal to %u.",
9309                    pRenderPassBegin->clearValueCount, reinterpret_cast<uint64_t &>(renderPass), clear_op_count, clear_op_count);
9310            }
9311            skipCall |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
9312            skipCall |= VerifyFramebufferAndRenderPassLayouts(dev_data, pCB, pRenderPassBegin);
9313            skipCall |= insideRenderPass(dev_data, pCB, "vkCmdBeginRenderPass");
9314            skipCall |= ValidateDependencies(dev_data, framebuffer, renderPass);
9315            skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdBeginRenderPass");
9316            skipCall |= addCmd(dev_data, pCB, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
9317            pCB->activeRenderPass = renderPass;
9318            // This is a shallow copy as that is all that is needed for now
9319            pCB->activeRenderPassBeginInfo = *pRenderPassBegin;
9320            pCB->activeSubpass = 0;
9321            pCB->activeSubpassContents = contents;
9322            pCB->framebuffers.insert(pRenderPassBegin->framebuffer);
9323            // Connect this framebuffer to this cmdBuffer
9324            framebuffer->cb_bindings.insert(pCB);
9325
9326            // transition attachments to the correct layouts for the first subpass
9327            TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
9328        } else {
9329            skipCall |=
9330                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9331                            DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
9332        }
9333    }
9334    lock.unlock();
9335    if (!skipCall) {
9336        dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
9337    }
9338}
9339
9340VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
9341    bool skipCall = false;
9342    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9343    std::unique_lock<std::mutex> lock(global_lock);
9344    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9345    if (pCB) {
9346        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
9347        skipCall |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
9348        pCB->activeSubpass++;
9349        pCB->activeSubpassContents = contents;
9350        TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
9351        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
9352    }
9353    lock.unlock();
9354    if (!skipCall)
9355        dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents);
9356}
9357
9358VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
9359    bool skipCall = false;
9360    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9361    std::unique_lock<std::mutex> lock(global_lock);
9362    auto pCB = getCBNode(dev_data, commandBuffer);
9363    if (pCB) {
9364        RENDER_PASS_NODE* pRPNode = pCB->activeRenderPass;
9365        auto framebuffer = getFramebuffer(dev_data, pCB->activeFramebuffer);
9366        if (pRPNode) {
9367            for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
9368                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
9369                VkFormat format = pRPNode->pCreateInfo->pAttachments[pRPNode->attachments[i].attachment].format;
9370                if (FormatSpecificLoadAndStoreOpSettings(format, pRPNode->attachments[i].store_op,
9371                                                         pRPNode->attachments[i].stencil_store_op, VK_ATTACHMENT_STORE_OP_STORE)) {
9372                    std::function<bool()> function = [=]() {
9373                        set_memory_valid(dev_data, fb_info.mem, true, fb_info.image);
9374                        return false;
9375                    };
9376                    pCB->validate_functions.push_back(function);
9377                } else if (FormatSpecificLoadAndStoreOpSettings(format, pRPNode->attachments[i].store_op,
9378                                                                pRPNode->attachments[i].stencil_store_op,
9379                                                                VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
9380                    std::function<bool()> function = [=]() {
9381                        set_memory_valid(dev_data, fb_info.mem, false, fb_info.image);
9382                        return false;
9383                    };
9384                    pCB->validate_functions.push_back(function);
9385                }
9386            }
9387        }
9388        skipCall |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
9389        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
9390        skipCall |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
9391        TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo);
9392        pCB->activeRenderPass = nullptr;
9393        pCB->activeSubpass = 0;
9394        pCB->activeFramebuffer = VK_NULL_HANDLE;
9395    }
9396    lock.unlock();
9397    if (!skipCall)
9398        dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer);
9399}
9400
9401static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, RENDER_PASS_NODE const *secondaryPass,
9402                                        RENDER_PASS_NODE const *primaryPass, uint32_t primaryAttach, uint32_t secondaryAttach,
9403                                        const char *msg) {
9404    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9405                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9406                   "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p which has a render pass 0x%" PRIx64
9407                   " that is not compatible with the current render pass 0x%" PRIx64 "."
9408                   "Attachment %" PRIu32 " is not compatible with %" PRIu32 ". %s",
9409                   (void *)secondaryBuffer, (uint64_t)(secondaryPass->renderPass), (uint64_t)(primaryPass->renderPass), primaryAttach, secondaryAttach,
9410                   msg);
9411}
9412
9413static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, RENDER_PASS_NODE const *primaryPass,
9414                                            uint32_t primaryAttach, VkCommandBuffer secondaryBuffer, RENDER_PASS_NODE const *secondaryPass,
9415                                            uint32_t secondaryAttach, bool is_multi) {
9416    bool skip_call = false;
9417    if (primaryPass->pCreateInfo->attachmentCount <= primaryAttach) {
9418        primaryAttach = VK_ATTACHMENT_UNUSED;
9419    }
9420    if (secondaryPass->pCreateInfo->attachmentCount <= secondaryAttach) {
9421        secondaryAttach = VK_ATTACHMENT_UNUSED;
9422    }
9423    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
9424        return skip_call;
9425    }
9426    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
9427        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9428                                                 secondaryAttach, "The first is unused while the second is not.");
9429        return skip_call;
9430    }
9431    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
9432        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9433                                                 secondaryAttach, "The second is unused while the first is not.");
9434        return skip_call;
9435    }
9436    if (primaryPass->pCreateInfo->pAttachments[primaryAttach].format !=
9437        secondaryPass->pCreateInfo->pAttachments[secondaryAttach].format) {
9438        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9439                                                 secondaryAttach, "They have different formats.");
9440    }
9441    if (primaryPass->pCreateInfo->pAttachments[primaryAttach].samples !=
9442        secondaryPass->pCreateInfo->pAttachments[secondaryAttach].samples) {
9443        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9444                                                 secondaryAttach, "They have different samples.");
9445    }
9446    if (is_multi &&
9447        primaryPass->pCreateInfo->pAttachments[primaryAttach].flags !=
9448            secondaryPass->pCreateInfo->pAttachments[secondaryAttach].flags) {
9449        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, secondaryPass, primaryPass, primaryAttach,
9450                                                 secondaryAttach, "They have different flags.");
9451    }
9452    return skip_call;
9453}
9454
9455static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, RENDER_PASS_NODE const *primaryPass,
9456                                         VkCommandBuffer secondaryBuffer, RENDER_PASS_NODE const *secondaryPass, const int subpass,
9457                                         bool is_multi) {
9458    bool skip_call = false;
9459    const VkSubpassDescription &primary_desc = primaryPass->pCreateInfo->pSubpasses[subpass];
9460    const VkSubpassDescription &secondary_desc = secondaryPass->pCreateInfo->pSubpasses[subpass];
9461    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
9462    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
9463        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
9464        if (i < primary_desc.inputAttachmentCount) {
9465            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
9466        }
9467        if (i < secondary_desc.inputAttachmentCount) {
9468            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
9469        }
9470        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_input_attach, secondaryBuffer,
9471                                                     secondaryPass, secondary_input_attach, is_multi);
9472    }
9473    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
9474    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
9475        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
9476        if (i < primary_desc.colorAttachmentCount) {
9477            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
9478        }
9479        if (i < secondary_desc.colorAttachmentCount) {
9480            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
9481        }
9482        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_color_attach, secondaryBuffer,
9483                                                     secondaryPass, secondary_color_attach, is_multi);
9484        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
9485        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
9486            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
9487        }
9488        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
9489            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
9490        }
9491        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_resolve_attach, secondaryBuffer,
9492                                                     secondaryPass, secondary_resolve_attach, is_multi);
9493    }
9494    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
9495    if (primary_desc.pDepthStencilAttachment) {
9496        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
9497    }
9498    if (secondary_desc.pDepthStencilAttachment) {
9499        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
9500    }
9501    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPass, primary_depthstencil_attach, secondaryBuffer,
9502                                                 secondaryPass, secondary_depthstencil_attach, is_multi);
9503    return skip_call;
9504}
9505
9506static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer, VkRenderPass primaryPass,
9507                                            VkCommandBuffer secondaryBuffer, VkRenderPass secondaryPass) {
9508    bool skip_call = false;
9509    // Early exit if renderPass objects are identical (and therefore compatible)
9510    if (primaryPass == secondaryPass)
9511        return skip_call;
9512    auto primary_render_pass = getRenderPass(dev_data, primaryPass);
9513    auto secondary_render_pass = getRenderPass(dev_data, secondaryPass);
9514    if (!primary_render_pass) {
9515        skip_call |=
9516            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9517                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9518                    "vkCmdExecuteCommands() called w/ invalid current Cmd Buffer 0x%p which has invalid render pass 0x%" PRIx64 ".",
9519                    (void *)primaryBuffer, (uint64_t)(primaryPass));
9520        return skip_call;
9521    }
9522    if (!secondary_render_pass) {
9523        skip_call |=
9524            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9525                    DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9526                    "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%p which has invalid render pass 0x%" PRIx64 ".",
9527                    (void *)secondaryBuffer, (uint64_t)(secondaryPass));
9528        return skip_call;
9529    }
9530    if (primary_render_pass->pCreateInfo->subpassCount != secondary_render_pass->pCreateInfo->subpassCount) {
9531        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9532                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9533                             "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p which has a render pass 0x%" PRIx64
9534                             " that is not compatible with the current render pass 0x%" PRIx64 "."
9535                             "They have a different number of subpasses.",
9536                             (void *)secondaryBuffer, (uint64_t)(secondaryPass), (uint64_t)(primaryPass));
9537        return skip_call;
9538    }
9539    auto subpassCount = primary_render_pass->pCreateInfo->subpassCount;
9540    for (uint32_t i = 0; i < subpassCount; ++i) {
9541        skip_call |= validateSubpassCompatibility(dev_data, primaryBuffer, primary_render_pass, secondaryBuffer,
9542                                                  secondary_render_pass, i, subpassCount > 1);
9543    }
9544    return skip_call;
9545}
9546
9547static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
9548                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
9549    bool skip_call = false;
9550    if (!pSubCB->beginInfo.pInheritanceInfo) {
9551        return skip_call;
9552    }
9553    VkFramebuffer primary_fb = pCB->activeFramebuffer;
9554    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
9555    if (secondary_fb != VK_NULL_HANDLE) {
9556        if (primary_fb != secondary_fb) {
9557            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9558                                 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9559                                 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p which has a framebuffer 0x%" PRIx64
9560                                 " that is not compatible with the current framebuffer 0x%" PRIx64 ".",
9561                                 (void *)secondaryBuffer, (uint64_t)(secondary_fb), (uint64_t)(primary_fb));
9562        }
9563        auto fb = getFramebuffer(dev_data, secondary_fb);
9564        if (!fb) {
9565            skip_call |=
9566                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9567                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9568                                                                          "which has invalid framebuffer 0x%" PRIx64 ".",
9569                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
9570            return skip_call;
9571        }
9572        skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->createInfo.renderPass,
9573                                                     secondaryBuffer, pSubCB->beginInfo.pInheritanceInfo->renderPass);
9574    }
9575    return skip_call;
9576}
9577
9578static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
9579    bool skipCall = false;
9580    unordered_set<int> activeTypes;
9581    for (auto queryObject : pCB->activeQueries) {
9582        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9583        if (queryPoolData != dev_data->queryPoolMap.end()) {
9584            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
9585                pSubCB->beginInfo.pInheritanceInfo) {
9586                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
9587                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
9588                    skipCall |= log_msg(
9589                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9590                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9591                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9592                        "which has invalid active query pool 0x%" PRIx64 ". Pipeline statistics is being queried so the command "
9593                        "buffer must have all bits set on the queryPool.",
9594                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
9595                }
9596            }
9597            activeTypes.insert(queryPoolData->second.createInfo.queryType);
9598        }
9599    }
9600    for (auto queryObject : pSubCB->startedQueries) {
9601        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
9602        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
9603            skipCall |=
9604                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9605                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9606                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
9607                        "which has invalid active query pool 0x%" PRIx64 "of type %d but a query of that type has been started on "
9608                        "secondary Cmd Buffer 0x%p.",
9609                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
9610                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
9611        }
9612    }
9613    return skipCall;
9614}
9615
9616VKAPI_ATTR void VKAPI_CALL
9617CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
9618    bool skipCall = false;
9619    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9620    std::unique_lock<std::mutex> lock(global_lock);
9621    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9622    if (pCB) {
9623        GLOBAL_CB_NODE *pSubCB = NULL;
9624        for (uint32_t i = 0; i < commandBuffersCount; i++) {
9625            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
9626            if (!pSubCB) {
9627                skipCall |=
9628                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9629                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9630                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p in element %u of pCommandBuffers array.",
9631                            (void *)pCommandBuffers[i], i);
9632            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
9633                skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9634                                    __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
9635                                    "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
9636                                    "array. All cmd buffers in pCommandBuffers array must be secondary.",
9637                                    (void *)pCommandBuffers[i], i);
9638            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
9639                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
9640                    skipCall |= log_msg(
9641                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9642                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
9643                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
9644                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
9645                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass);
9646                } else {
9647                    // Make sure render pass is compatible with parent command buffer pass if has continue
9648                    skipCall |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->renderPass, pCommandBuffers[i],
9649                                                                pSubCB->beginInfo.pInheritanceInfo->renderPass);
9650                    skipCall |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
9651                }
9652                string errorString = "";
9653                if (!verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->renderPass,
9654                                                     pSubCB->beginInfo.pInheritanceInfo->renderPass, errorString)) {
9655                    skipCall |= log_msg(
9656                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9657                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9658                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
9659                        ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
9660                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
9661                        (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str());
9662                }
9663                //  If framebuffer for secondary CB is not NULL, then it must match FB from vkCmdBeginRenderPass()
9664                //   that this CB will be executed in AND framebuffer must have been created w/ RP compatible w/ renderpass
9665                if (pSubCB->beginInfo.pInheritanceInfo->framebuffer) {
9666                    if (pSubCB->beginInfo.pInheritanceInfo->framebuffer != pCB->activeRenderPassBeginInfo.framebuffer) {
9667                        skipCall |= log_msg(
9668                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9669                            (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
9670                            "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) references framebuffer (0x%" PRIxLEAST64
9671                            ") that does not match framebuffer (0x%" PRIxLEAST64 ") in active renderpass (0x%" PRIxLEAST64 ").",
9672                            (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->framebuffer,
9673                            (uint64_t)pCB->activeRenderPassBeginInfo.framebuffer, (uint64_t)pCB->activeRenderPass->renderPass);
9674                    }
9675                }
9676            }
9677            // TODO(mlentine): Move more logic into this method
9678            skipCall |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
9679            skipCall |= validateCommandBufferState(dev_data, pSubCB);
9680            // Secondary cmdBuffers are considered pending execution starting w/
9681            // being recorded
9682            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
9683                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
9684                    skipCall |= log_msg(
9685                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9686                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9687                        "Attempt to simultaneously execute CB 0x%" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
9688                        "set!",
9689                        (uint64_t)(pCB->commandBuffer));
9690                }
9691                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
9692                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
9693                    skipCall |= log_msg(
9694                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9695                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
9696                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIxLEAST64
9697                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
9698                        "(0x%" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
9699                                          "set, even though it does.",
9700                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
9701                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
9702                }
9703            }
9704            if (!pCB->activeQueries.empty() && !dev_data->phys_dev_properties.features.inheritedQueries) {
9705                skipCall |=
9706                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9707                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
9708                            "vkCmdExecuteCommands(): Secondary Command Buffer "
9709                            "(0x%" PRIxLEAST64 ") cannot be submitted with a query in "
9710                            "flight and inherited queries not "
9711                            "supported on this device.",
9712                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
9713            }
9714            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
9715            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
9716            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
9717        }
9718        skipCall |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
9719        skipCall |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
9720    }
9721    lock.unlock();
9722    if (!skipCall)
9723        dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
9724}
9725
9726static bool ValidateMapImageLayouts(VkDevice device, VkDeviceMemory mem) {
9727    bool skip_call = false;
9728    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9729    auto mem_info = getMemObjInfo(dev_data, mem);
9730    if ((mem_info) && (mem_info->image != VK_NULL_HANDLE)) {
9731        std::vector<VkImageLayout> layouts;
9732        if (FindLayouts(dev_data, mem_info->image, layouts)) {
9733            for (auto layout : layouts) {
9734                if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
9735                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9736                                         __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
9737                                                                                         "GENERAL or PREINITIALIZED are supported.",
9738                                         string_VkImageLayout(layout));
9739                }
9740            }
9741        }
9742    }
9743    return skip_call;
9744}
9745
9746VKAPI_ATTR VkResult VKAPI_CALL
9747MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
9748    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9749
9750    bool skip_call = false;
9751    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9752    std::unique_lock<std::mutex> lock(global_lock);
9753#if MTMERGESOURCE
9754    DEVICE_MEM_INFO *pMemObj = getMemObjInfo(dev_data, mem);
9755    if (pMemObj) {
9756        pMemObj->valid = true;
9757        if ((dev_data->phys_dev_mem_props.memoryTypes[pMemObj->allocInfo.memoryTypeIndex].propertyFlags &
9758             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
9759            skip_call =
9760                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9761                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
9762                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
9763        }
9764    }
9765    skip_call |= validateMemRange(dev_data, mem, offset, size);
9766#endif
9767    skip_call |= ValidateMapImageLayouts(device, mem);
9768    lock.unlock();
9769
9770    if (!skip_call) {
9771        result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData);
9772        if (VK_SUCCESS == result) {
9773#if MTMERGESOURCE
9774            lock.lock();
9775            storeMemRanges(dev_data, mem, offset, size);
9776            initializeAndTrackMemory(dev_data, mem, size, ppData);
9777            lock.unlock();
9778#endif
9779        }
9780    }
9781    return result;
9782}
9783
9784VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
9785    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9786    bool skipCall = false;
9787
9788    std::unique_lock<std::mutex> lock(global_lock);
9789    skipCall |= deleteMemRanges(my_data, mem);
9790    lock.unlock();
9791    if (!skipCall) {
9792        my_data->device_dispatch_table->UnmapMemory(device, mem);
9793    }
9794}
9795
9796static bool validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount,
9797                                   const VkMappedMemoryRange *pMemRanges) {
9798    bool skipCall = false;
9799    for (uint32_t i = 0; i < memRangeCount; ++i) {
9800        auto mem_info = getMemObjInfo(my_data, pMemRanges[i].memory);
9801        if (mem_info) {
9802            if (mem_info->memRange.offset > pMemRanges[i].offset) {
9803                skipCall |=
9804                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9805                            (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
9806                            "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
9807                            "(" PRINTF_SIZE_T_SPECIFIER ").",
9808                            funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->memRange.offset));
9809            }
9810
9811            const uint64_t my_dataTerminus =
9812                    (mem_info->memRange.size == VK_WHOLE_SIZE) ? mem_info->allocInfo.allocationSize :
9813                                                                           (mem_info->memRange.offset + mem_info->memRange.size);
9814            if (pMemRanges[i].size != VK_WHOLE_SIZE && (my_dataTerminus < (pMemRanges[i].offset + pMemRanges[i].size))) {
9815                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9816                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9817                                    MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
9818                                                                 ") exceeds the Memory Object's upper-bound "
9819                                                                 "(" PRINTF_SIZE_T_SPECIFIER ").",
9820                                    funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
9821                                    static_cast<size_t>(my_dataTerminus));
9822            }
9823        }
9824    }
9825    return skipCall;
9826}
9827
9828static bool validateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount,
9829                                                     const VkMappedMemoryRange *pMemRanges) {
9830    bool skipCall = false;
9831    for (uint32_t i = 0; i < memRangeCount; ++i) {
9832        auto mem_info = getMemObjInfo(my_data, pMemRanges[i].memory);
9833        if (mem_info) {
9834            if (mem_info->pData) {
9835                VkDeviceSize size = mem_info->memRange.size;
9836                VkDeviceSize half_size = (size / 2);
9837                char *data = static_cast<char *>(mem_info->pData);
9838                for (auto j = 0; j < half_size; ++j) {
9839                    if (data[j] != NoncoherentMemoryFillValue) {
9840                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9841                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9842                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj 0x%" PRIxLEAST64,
9843                                            (uint64_t)pMemRanges[i].memory);
9844                    }
9845                }
9846                for (auto j = size + half_size; j < 2 * size; ++j) {
9847                    if (data[j] != NoncoherentMemoryFillValue) {
9848                        skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9849                                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
9850                                            MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj 0x%" PRIxLEAST64,
9851                                            (uint64_t)pMemRanges[i].memory);
9852                    }
9853                }
9854                memcpy(mem_info->pDriverData, static_cast<void *>(data + (size_t)(half_size)), (size_t)(size));
9855            }
9856        }
9857    }
9858    return skipCall;
9859}
9860
9861VkResult VKAPI_CALL
9862FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
9863    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9864    bool skipCall = false;
9865    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9866
9867    std::unique_lock<std::mutex> lock(global_lock);
9868    skipCall |= validateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
9869    skipCall |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
9870    lock.unlock();
9871    if (!skipCall) {
9872        result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
9873    }
9874    return result;
9875}
9876
9877VkResult VKAPI_CALL
9878InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
9879    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9880    bool skipCall = false;
9881    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9882
9883    std::unique_lock<std::mutex> lock(global_lock);
9884    skipCall |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
9885    lock.unlock();
9886    if (!skipCall) {
9887        result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
9888    }
9889    return result;
9890}
9891
9892VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
9893    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9894    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9895    bool skipCall = false;
9896    std::unique_lock<std::mutex> lock(global_lock);
9897    auto image_node = getImageNode(dev_data, image);
9898    if (image_node) {
9899        // Track objects tied to memory
9900        uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
9901        skipCall = set_mem_binding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
9902        VkMemoryRequirements memRequirements;
9903        lock.unlock();
9904        dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements);
9905        lock.lock();
9906
9907        // Track and validate bound memory range information
9908        auto mem_info = getMemObjInfo(dev_data, mem);
9909        if (mem_info) {
9910            const MEMORY_RANGE range =
9911                insert_memory_ranges(image_handle, mem, memoryOffset, memRequirements, mem_info->imageRanges);
9912            skipCall |= validate_memory_range(dev_data, mem_info->bufferRanges, range, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
9913            skipCall |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "vkBindImageMemory");
9914        }
9915
9916        print_mem_list(dev_data);
9917        lock.unlock();
9918        if (!skipCall) {
9919            result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset);
9920            lock.lock();
9921            dev_data->memObjMap[mem].get()->image = image;
9922            image_node->mem = mem;
9923            image_node->memOffset = memoryOffset;
9924            image_node->memSize = memRequirements.size;
9925            lock.unlock();
9926        }
9927    } else {
9928        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
9929                reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
9930                "vkBindImageMemory: Cannot find invalid image 0x%" PRIx64 ", has it already been deleted?",
9931                reinterpret_cast<const uint64_t &>(image));
9932    }
9933    return result;
9934}
9935
9936VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
9937    bool skip_call = false;
9938    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9939    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9940    std::unique_lock<std::mutex> lock(global_lock);
9941    auto event_node = dev_data->eventMap.find(event);
9942    if (event_node != dev_data->eventMap.end()) {
9943        event_node->second.needsSignaled = false;
9944        event_node->second.stageMask = VK_PIPELINE_STAGE_HOST_BIT;
9945        if (event_node->second.write_in_use) {
9946            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
9947                                 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
9948                                 "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
9949                                 reinterpret_cast<const uint64_t &>(event));
9950        }
9951    }
9952    lock.unlock();
9953    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
9954    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
9955    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
9956    for (auto queue_data : dev_data->queueMap) {
9957        auto event_entry = queue_data.second.eventToStageMap.find(event);
9958        if (event_entry != queue_data.second.eventToStageMap.end()) {
9959            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
9960        }
9961    }
9962    if (!skip_call)
9963        result = dev_data->device_dispatch_table->SetEvent(device, event);
9964    return result;
9965}
9966
9967VKAPI_ATTR VkResult VKAPI_CALL
9968QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
9969    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
9970    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
9971    bool skip_call = false;
9972    std::unique_lock<std::mutex> lock(global_lock);
9973    auto pFence = getFenceNode(dev_data, fence);
9974    auto pQueue = getQueueNode(dev_data, queue);
9975
9976    // First verify that fence is not in use
9977    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
9978
9979    if (fence != VK_NULL_HANDLE) {
9980        SubmitFence(pQueue, pFence);
9981    }
9982
9983    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
9984        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
9985        // Track objects tied to memory
9986        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
9987            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
9988                if (set_sparse_mem_binding(dev_data, bindInfo.pBufferBinds[j].pBinds[k].memory,
9989                                           (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
9990                                           "vkQueueBindSparse"))
9991                    skip_call = true;
9992            }
9993        }
9994        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
9995            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
9996                if (set_sparse_mem_binding(dev_data, bindInfo.pImageOpaqueBinds[j].pBinds[k].memory,
9997                                           (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
9998                                           "vkQueueBindSparse"))
9999                    skip_call = true;
10000            }
10001        }
10002        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
10003            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
10004                if (set_sparse_mem_binding(dev_data, bindInfo.pImageBinds[j].pBinds[k].memory,
10005                                           (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
10006                                           "vkQueueBindSparse"))
10007                    skip_call = true;
10008            }
10009        }
10010        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
10011            VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
10012            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
10013            if (pSemaphore) {
10014                if (pSemaphore->signaled) {
10015                    pSemaphore->signaled = false;
10016                } else {
10017                    skip_call |=
10018                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10019                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10020                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64
10021                                " that has no way to be signaled.",
10022                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
10023                }
10024            }
10025        }
10026        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
10027            VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
10028            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
10029            if (pSemaphore) {
10030                if (pSemaphore->signaled) {
10031                    skip_call =
10032                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10033                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10034                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
10035                                ", but that semaphore is already signaled.",
10036                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
10037                }
10038                pSemaphore->signaled = true;
10039            }
10040        }
10041    }
10042    print_mem_list(dev_data);
10043    lock.unlock();
10044
10045    if (!skip_call)
10046        return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
10047
10048    return result;
10049}
10050
10051VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
10052                                               const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
10053    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10054    VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
10055    if (result == VK_SUCCESS) {
10056        std::lock_guard<std::mutex> lock(global_lock);
10057        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
10058        sNode->signaled = false;
10059        sNode->queue = VK_NULL_HANDLE;
10060        sNode->in_use.store(0);
10061    }
10062    return result;
10063}
10064
10065VKAPI_ATTR VkResult VKAPI_CALL
10066CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
10067    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10068    VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
10069    if (result == VK_SUCCESS) {
10070        std::lock_guard<std::mutex> lock(global_lock);
10071        dev_data->eventMap[*pEvent].needsSignaled = false;
10072        dev_data->eventMap[*pEvent].in_use.store(0);
10073        dev_data->eventMap[*pEvent].write_in_use = 0;
10074        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
10075    }
10076    return result;
10077}
10078
10079VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
10080                                                  const VkAllocationCallbacks *pAllocator,
10081                                                  VkSwapchainKHR *pSwapchain) {
10082    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10083    VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
10084
10085    if (VK_SUCCESS == result) {
10086        std::lock_guard<std::mutex> lock(global_lock);
10087        dev_data->device_extensions.swapchainMap[*pSwapchain] = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo));
10088    }
10089
10090    return result;
10091}
10092
10093VKAPI_ATTR void VKAPI_CALL
10094DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
10095    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10096    bool skipCall = false;
10097
10098    std::unique_lock<std::mutex> lock(global_lock);
10099    auto swapchain_data = getSwapchainNode(dev_data, swapchain);
10100    if (swapchain_data) {
10101        if (swapchain_data->images.size() > 0) {
10102            for (auto swapchain_image : swapchain_data->images) {
10103                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
10104                if (image_sub != dev_data->imageSubresourceMap.end()) {
10105                    for (auto imgsubpair : image_sub->second) {
10106                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
10107                        if (image_item != dev_data->imageLayoutMap.end()) {
10108                            dev_data->imageLayoutMap.erase(image_item);
10109                        }
10110                    }
10111                    dev_data->imageSubresourceMap.erase(image_sub);
10112                }
10113                skipCall = clear_object_binding(dev_data, (uint64_t)swapchain_image,
10114                                                VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
10115                dev_data->imageMap.erase(swapchain_image);
10116            }
10117        }
10118        dev_data->device_extensions.swapchainMap.erase(swapchain);
10119    }
10120    lock.unlock();
10121    if (!skipCall)
10122        dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator);
10123}
10124
10125VKAPI_ATTR VkResult VKAPI_CALL
10126GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
10127    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10128    VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
10129
10130    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
10131        // This should never happen and is checked by param checker.
10132        if (!pCount)
10133            return result;
10134        std::lock_guard<std::mutex> lock(global_lock);
10135        const size_t count = *pCount;
10136        auto swapchain_node = getSwapchainNode(dev_data, swapchain);
10137        if (swapchain_node && !swapchain_node->images.empty()) {
10138            // TODO : Not sure I like the memcmp here, but it works
10139            const bool mismatch = (swapchain_node->images.size() != count ||
10140                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
10141            if (mismatch) {
10142                // TODO: Verify against Valid Usage section of extension
10143                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
10144                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
10145                        "vkGetSwapchainInfoKHR(0x%" PRIx64
10146                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
10147                        (uint64_t)(swapchain));
10148            }
10149        }
10150        for (uint32_t i = 0; i < *pCount; ++i) {
10151            IMAGE_LAYOUT_NODE image_layout_node;
10152            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
10153            image_layout_node.format = swapchain_node->createInfo.imageFormat;
10154            // Add imageMap entries for each swapchain image
10155            VkImageCreateInfo image_ci = {};
10156            image_ci.mipLevels = 1;
10157            image_ci.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
10158            image_ci.usage = swapchain_node->createInfo.imageUsage;
10159            image_ci.format = swapchain_node->createInfo.imageFormat;
10160            image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
10161            image_ci.extent.width = swapchain_node->createInfo.imageExtent.width;
10162            image_ci.extent.height = swapchain_node->createInfo.imageExtent.height;
10163            image_ci.sharingMode = swapchain_node->createInfo.imageSharingMode;
10164            dev_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_NODE>(new IMAGE_NODE(&image_ci));
10165            auto &image_node = dev_data->imageMap[pSwapchainImages[i]];
10166            image_node->valid = false;
10167            image_node->mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
10168            swapchain_node->images.push_back(pSwapchainImages[i]);
10169            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
10170            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
10171            dev_data->imageLayoutMap[subpair] = image_layout_node;
10172            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
10173        }
10174    }
10175    return result;
10176}
10177
10178VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
10179    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
10180    bool skip_call = false;
10181
10182    std::lock_guard<std::mutex> lock(global_lock);
10183    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10184        auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
10185        if (pSemaphore && !pSemaphore->signaled) {
10186            skip_call |=
10187                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10188                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10189                            "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
10190                            reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(pPresentInfo->pWaitSemaphores[i]));
10191        }
10192    }
10193    VkDeviceMemory mem;
10194    for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
10195        auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
10196        if (swapchain_data && pPresentInfo->pImageIndices[i] < swapchain_data->images.size()) {
10197            VkImage image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
10198#if MTMERGESOURCE
10199            skip_call |= getImageMemory(dev_data, image, &mem);
10200            skip_call |= validate_memory_is_valid(dev_data, mem, "vkQueuePresentKHR()", image);
10201#endif
10202            vector<VkImageLayout> layouts;
10203            if (FindLayouts(dev_data, image, layouts)) {
10204                for (auto layout : layouts) {
10205                    if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
10206                        skip_call |=
10207                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
10208                                        reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10209                                        "Images passed to present must be in layout "
10210                                        "PRESENT_SOURCE_KHR but is in %s",
10211                                        string_VkImageLayout(layout));
10212                    }
10213                }
10214            }
10215        }
10216    }
10217
10218    if (skip_call) {
10219        return VK_ERROR_VALIDATION_FAILED_EXT;
10220    }
10221
10222    VkResult result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo);
10223
10224    if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
10225        // Semaphore waits occur before error generation, if the call reached
10226        // the ICD. (Confirm?)
10227        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
10228            auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
10229            if (pSemaphore && pSemaphore->signaled) {
10230                pSemaphore->signaled = false;
10231            }
10232        }
10233    }
10234
10235    return result;
10236}
10237
10238VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
10239                                                   VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
10240    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10241    bool skipCall = false;
10242
10243    std::unique_lock<std::mutex> lock(global_lock);
10244    auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
10245    if (pSemaphore && pSemaphore->signaled) {
10246        skipCall |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
10247                           reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10248                           "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
10249    }
10250
10251    auto pFence = getFenceNode(dev_data, fence);
10252    if (pFence) {
10253        skipCall |= ValidateFenceForSubmit(dev_data, pFence);
10254    }
10255    lock.unlock();
10256
10257    if (skipCall)
10258        return VK_ERROR_VALIDATION_FAILED_EXT;
10259
10260    VkResult result =
10261            dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
10262
10263    lock.lock();
10264    if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
10265        if (pFence) {
10266            pFence->state = FENCE_INFLIGHT;
10267        }
10268
10269        // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore
10270        if (pSemaphore) {
10271            pSemaphore->signaled = true;
10272        }
10273    }
10274    lock.unlock();
10275
10276    return result;
10277}
10278
10279VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
10280                                                        VkPhysicalDevice *pPhysicalDevices) {
10281    bool skipCall = false;
10282    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10283    if (my_data->instance_state) {
10284        // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
10285        if (NULL == pPhysicalDevices) {
10286            my_data->instance_state->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
10287        } else {
10288            if (UNCALLED == my_data->instance_state->vkEnumeratePhysicalDevicesState) {
10289                // Flag warning here. You can call this without having queried the count, but it may not be
10290                // robust on platforms with multiple physical devices.
10291                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
10292                                    0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
10293                                    "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first "
10294                                    "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
10295            } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
10296            else if (my_data->instance_state->physical_devices_count != *pPhysicalDeviceCount) {
10297                // Having actual count match count from app is not a requirement, so this can be a warning
10298                skipCall |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
10299                                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
10300                                    "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count "
10301                                    "supported by this instance is %u.",
10302                                    *pPhysicalDeviceCount, my_data->instance_state->physical_devices_count);
10303            }
10304            my_data->instance_state->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
10305        }
10306        if (skipCall) {
10307            return VK_ERROR_VALIDATION_FAILED_EXT;
10308        }
10309        VkResult result =
10310            my_data->instance_dispatch_table->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
10311        if (NULL == pPhysicalDevices) {
10312            my_data->instance_state->physical_devices_count = *pPhysicalDeviceCount;
10313        } else { // Save physical devices
10314            for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
10315                layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(pPhysicalDevices[i]), layer_data_map);
10316                phy_dev_data->physical_device_state = unique_ptr<PHYSICAL_DEVICE_STATE>(new PHYSICAL_DEVICE_STATE());
10317                // Init actual features for each physical device
10318                my_data->instance_dispatch_table->GetPhysicalDeviceFeatures(pPhysicalDevices[i],
10319                                                                            &phy_dev_data->physical_device_features);
10320            }
10321        }
10322        return result;
10323    } else {
10324        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__,
10325                DEVLIMITS_INVALID_INSTANCE, "DL", "Invalid instance (0x%" PRIxLEAST64 ") passed into vkEnumeratePhysicalDevices().",
10326                (uint64_t)instance);
10327    }
10328    return VK_ERROR_VALIDATION_FAILED_EXT;
10329}
10330
10331VKAPI_ATTR void VKAPI_CALL
10332GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
10333    VkQueueFamilyProperties *pQueueFamilyProperties) {
10334    bool skip_call = false;
10335    layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
10336    if (phy_dev_data->physical_device_state) {
10337        if (NULL == pQueueFamilyProperties) {
10338            phy_dev_data->physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
10339        }
10340        else {
10341            // Verify that for each physical device, this function is called first with NULL pQueueFamilyProperties ptr in order to
10342            // get count
10343            if (UNCALLED == phy_dev_data->physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
10344                skip_call |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
10345                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
10346                    "Call sequence has vkGetPhysicalDeviceQueueFamilyProperties() w/ non-NULL "
10347                    "pQueueFamilyProperties. You should first call vkGetPhysicalDeviceQueueFamilyProperties() w/ "
10348                    "NULL pQueueFamilyProperties to query pCount.");
10349            }
10350            // Then verify that pCount that is passed in on second call matches what was returned
10351            if (phy_dev_data->physical_device_state->queueFamilyPropertiesCount != *pCount) {
10352
10353                // TODO: this is not a requirement of the Valid Usage section for vkGetPhysicalDeviceQueueFamilyProperties, so
10354                // provide as warning
10355                skip_call |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
10356                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
10357                    "Call to vkGetPhysicalDeviceQueueFamilyProperties() w/ pCount value %u, but actual count "
10358                    "supported by this physicalDevice is %u.",
10359                    *pCount, phy_dev_data->physical_device_state->queueFamilyPropertiesCount);
10360            }
10361            phy_dev_data->physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
10362        }
10363        if (skip_call) {
10364            return;
10365        }
10366        phy_dev_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pCount,
10367            pQueueFamilyProperties);
10368        if (NULL == pQueueFamilyProperties) {
10369            phy_dev_data->physical_device_state->queueFamilyPropertiesCount = *pCount;
10370        }
10371        else { // Save queue family properties
10372            phy_dev_data->queue_family_properties.reserve(*pCount);
10373            for (uint32_t i = 0; i < *pCount; i++) {
10374                phy_dev_data->queue_family_properties.emplace_back(new VkQueueFamilyProperties(pQueueFamilyProperties[i]));
10375            }
10376        }
10377        return;
10378    }
10379    else {
10380        log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
10381            __LINE__, DEVLIMITS_INVALID_PHYSICAL_DEVICE, "DL",
10382            "Invalid physicalDevice (0x%" PRIxLEAST64 ") passed into vkGetPhysicalDeviceQueueFamilyProperties().",
10383            (uint64_t)physicalDevice);
10384    }
10385}
10386
10387VKAPI_ATTR VkResult VKAPI_CALL
10388CreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10389                             const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
10390    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10391    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10392    VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10393    if (VK_SUCCESS == res) {
10394        std::lock_guard<std::mutex> lock(global_lock);
10395        res = layer_create_msg_callback(my_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
10396    }
10397    return res;
10398}
10399
10400VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance,
10401                                                         VkDebugReportCallbackEXT msgCallback,
10402                                                         const VkAllocationCallbacks *pAllocator) {
10403    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10404    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10405    pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10406    std::lock_guard<std::mutex> lock(global_lock);
10407    layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
10408}
10409
10410VKAPI_ATTR void VKAPI_CALL
10411DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
10412                      size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10413    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10414    my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
10415                                                            pMsg);
10416}
10417
10418VKAPI_ATTR VkResult VKAPI_CALL
10419EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
10420    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
10421}
10422
10423VKAPI_ATTR VkResult VKAPI_CALL
10424EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
10425    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
10426}
10427
10428VKAPI_ATTR VkResult VKAPI_CALL
10429EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
10430    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
10431        return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
10432
10433    return VK_ERROR_LAYER_NOT_PRESENT;
10434}
10435
10436VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
10437                                                                  const char *pLayerName, uint32_t *pCount,
10438                                                                  VkExtensionProperties *pProperties) {
10439    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
10440        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
10441
10442    assert(physicalDevice);
10443
10444    dispatch_key key = get_dispatch_key(physicalDevice);
10445    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
10446    return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
10447}
10448
10449static PFN_vkVoidFunction
10450intercept_core_instance_command(const char *name);
10451
10452static PFN_vkVoidFunction
10453intercept_core_device_command(const char *name);
10454
10455static PFN_vkVoidFunction
10456intercept_khr_swapchain_command(const char *name, VkDevice dev);
10457
10458VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) {
10459    PFN_vkVoidFunction proc = intercept_core_device_command(funcName);
10460    if (proc)
10461        return proc;
10462
10463    assert(dev);
10464
10465    proc = intercept_khr_swapchain_command(funcName, dev);
10466    if (proc)
10467        return proc;
10468
10469    layer_data *dev_data;
10470    dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
10471
10472    VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
10473    {
10474        if (pTable->GetDeviceProcAddr == NULL)
10475            return NULL;
10476        return pTable->GetDeviceProcAddr(dev, funcName);
10477    }
10478}
10479
10480VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
10481    PFN_vkVoidFunction proc = intercept_core_instance_command(funcName);
10482    if (!proc)
10483        proc = intercept_core_device_command(funcName);
10484    if (!proc)
10485        proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE);
10486    if (proc)
10487        return proc;
10488
10489    assert(instance);
10490
10491    layer_data *my_data;
10492    my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
10493    proc = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
10494    if (proc)
10495        return proc;
10496
10497    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
10498    if (pTable->GetInstanceProcAddr == NULL)
10499        return NULL;
10500    return pTable->GetInstanceProcAddr(instance, funcName);
10501}
10502
10503static PFN_vkVoidFunction
10504intercept_core_instance_command(const char *name) {
10505    static const struct {
10506        const char *name;
10507        PFN_vkVoidFunction proc;
10508    } core_instance_commands[] = {
10509        { "vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr) },
10510        { "vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr) },
10511        { "vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance) },
10512        { "vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice) },
10513        { "vkEnumeratePhysicalDevices", reinterpret_cast<PFN_vkVoidFunction>(EnumeratePhysicalDevices) },
10514        { "vkGetPhysicalDeviceQueueFamilyProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceQueueFamilyProperties) },
10515        { "vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance) },
10516        { "vkEnumerateInstanceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceLayerProperties) },
10517        { "vkEnumerateDeviceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceLayerProperties) },
10518        { "vkEnumerateInstanceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceExtensionProperties) },
10519        { "vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties) },
10520    };
10521
10522    for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) {
10523        if (!strcmp(core_instance_commands[i].name, name))
10524            return core_instance_commands[i].proc;
10525    }
10526
10527    return nullptr;
10528}
10529
10530static PFN_vkVoidFunction
10531intercept_core_device_command(const char *name) {
10532    static const struct {
10533        const char *name;
10534        PFN_vkVoidFunction proc;
10535    } core_device_commands[] = {
10536        {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
10537        {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)},
10538        {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)},
10539        {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)},
10540        {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)},
10541        {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)},
10542        {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)},
10543        {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
10544        {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)},
10545        {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)},
10546        {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)},
10547        {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)},
10548        {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)},
10549        {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)},
10550        {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)},
10551        {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)},
10552        {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)},
10553        {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)},
10554        {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)},
10555        {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)},
10556        {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)},
10557        {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)},
10558        {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)},
10559        {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)},
10560        {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)},
10561        {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)},
10562        {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)},
10563        {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)},
10564        {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)},
10565        {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)},
10566        {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)},
10567        {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)},
10568        {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)},
10569        {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)},
10570        {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)},
10571        {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)},
10572        {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)},
10573        {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)},
10574        {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)},
10575        {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)},
10576        {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)},
10577        {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)},
10578        {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)},
10579        {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)},
10580        {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)},
10581        {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)},
10582        {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)},
10583        {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)},
10584        {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)},
10585        {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)},
10586        {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)},
10587        {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)},
10588        {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)},
10589        {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)},
10590        {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)},
10591        {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)},
10592        {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)},
10593        {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)},
10594        {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)},
10595        {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)},
10596        {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)},
10597        {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)},
10598        {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)},
10599        {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)},
10600        {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)},
10601        {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)},
10602        {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)},
10603        {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)},
10604        {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)},
10605        {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)},
10606        {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)},
10607        {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)},
10608        {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)},
10609        {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)},
10610        {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)},
10611        {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)},
10612        {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)},
10613        {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)},
10614        {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)},
10615        {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)},
10616        {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)},
10617        {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)},
10618        {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)},
10619        {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)},
10620        {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)},
10621        {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)},
10622        {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)},
10623        {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)},
10624        {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)},
10625        {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)},
10626        {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)},
10627        {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)},
10628        {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)},
10629        {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)},
10630        {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)},
10631        {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)},
10632        {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)},
10633        {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)},
10634        {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)},
10635        {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)},
10636        {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)},
10637        {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)},
10638        {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)},
10639        {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)},
10640        {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)},
10641        {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)},
10642        {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)},
10643        {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)},
10644        {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)},
10645        {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)},
10646        {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)},
10647        {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)},
10648        {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)},
10649        {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)},
10650        {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)},
10651        {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)},
10652    };
10653
10654    for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) {
10655        if (!strcmp(core_device_commands[i].name, name))
10656            return core_device_commands[i].proc;
10657    }
10658
10659    return nullptr;
10660}
10661
10662static PFN_vkVoidFunction
10663intercept_khr_swapchain_command(const char *name, VkDevice dev) {
10664    static const struct {
10665        const char *name;
10666        PFN_vkVoidFunction proc;
10667    } khr_swapchain_commands[] = {
10668        { "vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR) },
10669        { "vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR) },
10670        { "vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR) },
10671        { "vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR) },
10672        { "vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR) },
10673    };
10674
10675    if (dev) {
10676        layer_data *dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
10677        if (!dev_data->device_extensions.wsi_enabled)
10678            return nullptr;
10679    }
10680
10681    for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) {
10682        if (!strcmp(khr_swapchain_commands[i].name, name))
10683            return khr_swapchain_commands[i].proc;
10684    }
10685
10686    return nullptr;
10687}
10688
10689} // namespace core_validation
10690
10691// vk_layer_logging.h expects these to be defined
10692
10693VKAPI_ATTR VkResult VKAPI_CALL
10694vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
10695                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
10696    return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
10697}
10698
10699VKAPI_ATTR void VKAPI_CALL
10700vkDestroyDebugReportCallbackEXT(VkInstance instance,
10701                                VkDebugReportCallbackEXT msgCallback,
10702                                const VkAllocationCallbacks *pAllocator) {
10703    core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
10704}
10705
10706VKAPI_ATTR void VKAPI_CALL
10707vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
10708                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
10709    core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
10710}
10711
10712// loader-layer interface v0, just wrappers since there is only a layer
10713
10714VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10715vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
10716    return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
10717}
10718
10719VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10720vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
10721    return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
10722}
10723
10724VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
10725vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
10726    // the layer command handles VK_NULL_HANDLE just fine internally
10727    assert(physicalDevice == VK_NULL_HANDLE);
10728    return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
10729}
10730
10731VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
10732                                                                                    const char *pLayerName, uint32_t *pCount,
10733                                                                                    VkExtensionProperties *pProperties) {
10734    // the layer command handles VK_NULL_HANDLE just fine internally
10735    assert(physicalDevice == VK_NULL_HANDLE);
10736    return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
10737}
10738
10739VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
10740    return core_validation::GetDeviceProcAddr(dev, funcName);
10741}
10742
10743VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
10744    return core_validation::GetInstanceProcAddr(instance, funcName);
10745}
10746