core_validation.cpp revision 888cae09036ec622d6014e18efbda55e6226cf22
1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 */
26
27// Allow use of STL min and max functions in Windows
28#define NOMINMAX
29
30// Turn on mem_tracker merged code
31#define MTMERGESOURCE 1
32
33#include <SPIRV/spirv.hpp>
34#include <algorithm>
35#include <assert.h>
36#include <iostream>
37#include <list>
38#include <map>
39#include <mutex>
40#include <set>
41//#include <memory>
42#include <stdio.h>
43#include <stdlib.h>
44#include <string.h>
45#include <string>
46#include <tuple>
47
48#include "vk_loader_platform.h"
49#include "vk_dispatch_table_helper.h"
50#include "vk_struct_string_helper_cpp.h"
51#if defined(__GNUC__)
52#pragma GCC diagnostic ignored "-Wwrite-strings"
53#endif
54#if defined(__GNUC__)
55#pragma GCC diagnostic warning "-Wwrite-strings"
56#endif
57#include "vk_struct_size_helper.h"
58#include "core_validation.h"
59#include "vk_layer_table.h"
60#include "vk_layer_data.h"
61#include "vk_layer_extension_utils.h"
62#include "vk_layer_utils.h"
63#include "spirv-tools/libspirv.h"
64
65#if defined __ANDROID__
66#include <android/log.h>
67#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
68#else
69#define LOGCONSOLE(...)                                                                                                            \
70    {                                                                                                                              \
71        printf(__VA_ARGS__);                                                                                                       \
72        printf("\n");                                                                                                              \
73    }
74#endif
75
76using namespace std;
77
78namespace core_validation {
79
80using std::unordered_map;
81using std::unordered_set;
82
83// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
84// Object value will be used to identify them internally.
85static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
86// 2nd special memory handle used to flag object as unbound from memory
87static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
88
89struct devExts {
90    bool wsi_enabled;
91    bool wsi_display_swapchain_enabled;
92    unordered_map<VkSwapchainKHR, unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
93    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
94};
95
96// fwd decls
97struct shader_module;
98
99// TODO : Split this into separate structs for instance and device level data?
100struct layer_data {
101    VkInstance instance;
102    unique_ptr<INSTANCE_STATE> instance_state;
103
104
105    debug_report_data *report_data;
106    std::vector<VkDebugReportCallbackEXT> logging_callback;
107    VkLayerDispatchTable *device_dispatch_table;
108    VkLayerInstanceDispatchTable *instance_dispatch_table;
109
110    devExts device_extensions;
111    unordered_set<VkQueue> queues;  // All queues under given device
112    // Vector indices correspond to queueFamilyIndex
113    vector<unique_ptr<VkQueueFamilyProperties>> queue_family_properties;
114    // Global set of all cmdBuffers that are inFlight on this device
115    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
116    // Layer specific data
117    unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> samplerMap;
118    unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
119    unordered_map<VkImage, unique_ptr<IMAGE_NODE>> imageMap;
120    unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
121    unordered_map<VkBuffer, unique_ptr<BUFFER_NODE>> bufferMap;
122    unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
123    unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
124    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
125    unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
126    unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
127    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
128    unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
129    unordered_map<VkFence, FENCE_NODE> fenceMap;
130    unordered_map<VkQueue, QUEUE_NODE> queueMap;
131    unordered_map<VkEvent, EVENT_NODE> eventMap;
132    unordered_map<QueryObject, bool> queryToStateMap;
133    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
134    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
135    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
136    unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_NODE>> frameBufferMap;
137    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
138    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
139    unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
140    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
141    VkDevice device;
142
143    // Device specific data
144    PHYS_DEV_PROPERTIES_NODE phys_dev_properties;
145    VkPhysicalDeviceMemoryProperties phys_dev_mem_props;
146    VkPhysicalDeviceFeatures physical_device_features;
147    unique_ptr<PHYSICAL_DEVICE_STATE> physical_device_state;
148
149    layer_data()
150        : instance_state(nullptr), report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr),
151          device_extensions(), device(VK_NULL_HANDLE), phys_dev_properties{}, phys_dev_mem_props{}, physical_device_features{},
152          physical_device_state(nullptr){};
153};
154
155// TODO : Do we need to guard access to layer_data_map w/ lock?
156static unordered_map<void *, layer_data *> layer_data_map;
157
158static const VkLayerProperties global_layer = {
159    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
160};
161
162template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
163    bool foundLayer = false;
164    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
165        if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
166            foundLayer = true;
167        }
168        // This has to be logged to console as we don't have a callback at this point.
169        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
170            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
171                       global_layer.layerName);
172        }
173    }
174}
175
176// Code imported from shader_checker
177static void build_def_index(shader_module *);
178
179// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
180// without the caller needing to care too much about the physical SPIRV module layout.
181struct spirv_inst_iter {
182    std::vector<uint32_t>::const_iterator zero;
183    std::vector<uint32_t>::const_iterator it;
184
185    uint32_t len() {
186        auto result = *it >> 16;
187        assert(result > 0);
188        return result;
189    }
190
191    uint32_t opcode() { return *it & 0x0ffffu; }
192
193    uint32_t const &word(unsigned n) {
194        assert(n < len());
195        return it[n];
196    }
197
198    uint32_t offset() { return (uint32_t)(it - zero); }
199
200    spirv_inst_iter() {}
201
202    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
203
204    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
205
206    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
207
208    spirv_inst_iter operator++(int) { /* x++ */
209        spirv_inst_iter ii = *this;
210        it += len();
211        return ii;
212    }
213
214    spirv_inst_iter operator++() { /* ++x; */
215        it += len();
216        return *this;
217    }
218
219    /* The iterator and the value are the same thing. */
220    spirv_inst_iter &operator*() { return *this; }
221    spirv_inst_iter const &operator*() const { return *this; }
222};
223
224struct shader_module {
225    /* the spirv image itself */
226    vector<uint32_t> words;
227    /* a mapping of <id> to the first word of its def. this is useful because walking type
228     * trees, constant expressions, etc requires jumping all over the instruction stream.
229     */
230    unordered_map<unsigned, unsigned> def_index;
231
232    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
233        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
234          def_index() {
235
236        build_def_index(this);
237    }
238
239    /* expose begin() / end() to enable range-based for */
240    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
241    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
242    /* given an offset into the module, produce an iterator there. */
243    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
244
245    /* gets an iterator to the definition of an id */
246    spirv_inst_iter get_def(unsigned id) const {
247        auto it = def_index.find(id);
248        if (it == def_index.end()) {
249            return end();
250        }
251        return at(it->second);
252    }
253};
254
255// TODO : This can be much smarter, using separate locks for separate global data
256static std::mutex global_lock;
257
258// Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
259IMAGE_VIEW_STATE *getImageViewState(const layer_data *dev_data, VkImageView image_view) {
260    auto iv_it = dev_data->imageViewMap.find(image_view);
261    if (iv_it == dev_data->imageViewMap.end()) {
262        return nullptr;
263    }
264    return iv_it->second.get();
265}
266// Return sampler node ptr for specified sampler or else NULL
267SAMPLER_NODE *getSamplerNode(const layer_data *dev_data, VkSampler sampler) {
268    auto sampler_it = dev_data->samplerMap.find(sampler);
269    if (sampler_it == dev_data->samplerMap.end()) {
270        return nullptr;
271    }
272    return sampler_it->second.get();
273}
274// Return image node ptr for specified image or else NULL
275IMAGE_NODE *getImageNode(const layer_data *dev_data, VkImage image) {
276    auto img_it = dev_data->imageMap.find(image);
277    if (img_it == dev_data->imageMap.end()) {
278        return nullptr;
279    }
280    return img_it->second.get();
281}
282// Return buffer node ptr for specified buffer or else NULL
283BUFFER_NODE *getBufferNode(const layer_data *dev_data, VkBuffer buffer) {
284    auto buff_it = dev_data->bufferMap.find(buffer);
285    if (buff_it == dev_data->bufferMap.end()) {
286        return nullptr;
287    }
288    return buff_it->second.get();
289}
290// Return swapchain node for specified swapchain or else NULL
291SWAPCHAIN_NODE *getSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
292    auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain);
293    if (swp_it == dev_data->device_extensions.swapchainMap.end()) {
294        return nullptr;
295    }
296    return swp_it->second.get();
297}
298// Return swapchain for specified image or else NULL
299VkSwapchainKHR getSwapchainFromImage(const layer_data *dev_data, VkImage image) {
300    auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image);
301    if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) {
302        return VK_NULL_HANDLE;
303    }
304    return img_it->second;
305}
306// Return buffer node ptr for specified buffer or else NULL
307BUFFER_VIEW_STATE *getBufferViewState(const layer_data *my_data, VkBufferView buffer_view) {
308    auto bv_it = my_data->bufferViewMap.find(buffer_view);
309    if (bv_it == my_data->bufferViewMap.end()) {
310        return nullptr;
311    }
312    return bv_it->second.get();
313}
314
315FENCE_NODE *getFenceNode(layer_data *dev_data, VkFence fence) {
316    auto it = dev_data->fenceMap.find(fence);
317    if (it == dev_data->fenceMap.end()) {
318        return nullptr;
319    }
320    return &it->second;
321}
322
323EVENT_NODE *getEventNode(layer_data *dev_data, VkEvent event) {
324    auto it = dev_data->eventMap.find(event);
325    if (it == dev_data->eventMap.end()) {
326        return nullptr;
327    }
328    return &it->second;
329}
330
331QUERY_POOL_NODE *getQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
332    auto it = dev_data->queryPoolMap.find(query_pool);
333    if (it == dev_data->queryPoolMap.end()) {
334        return nullptr;
335    }
336    return &it->second;
337}
338
339QUEUE_NODE *getQueueNode(layer_data *dev_data, VkQueue queue) {
340    auto it = dev_data->queueMap.find(queue);
341    if (it == dev_data->queueMap.end()) {
342        return nullptr;
343    }
344    return &it->second;
345}
346
347SEMAPHORE_NODE *getSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
348    auto it = dev_data->semaphoreMap.find(semaphore);
349    if (it == dev_data->semaphoreMap.end()) {
350        return nullptr;
351    }
352    return &it->second;
353}
354
355COMMAND_POOL_NODE *getCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
356    auto it = dev_data->commandPoolMap.find(pool);
357    if (it == dev_data->commandPoolMap.end()) {
358        return nullptr;
359    }
360    return &it->second;
361}
362// Return ptr to bound memory for given handle of specified type and set sparse param to indicate if binding is sparse
363static VkDeviceMemory *GetObjectMemBinding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type, bool *sparse) {
364    switch (type) {
365    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
366        auto img_node = getImageNode(my_data, VkImage(handle));
367        *sparse = img_node->createInfo.flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
368        if (img_node)
369            return &img_node->mem;
370        break;
371    }
372    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
373        auto buff_node = getBufferNode(my_data, VkBuffer(handle));
374        *sparse = buff_node->createInfo.flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT;
375        if (buff_node)
376            return &buff_node->mem;
377        break;
378    }
379    default:
380        break;
381    }
382    return nullptr;
383}
384// Overloaded version of above function that doesn't care about sparse bool
385static VkDeviceMemory *GetObjectMemBinding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
386    bool sparse;
387    return GetObjectMemBinding(my_data, handle, type, &sparse);
388}
389// prototype
390static GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer);
391
392// Helper function to validate correct usage bits set for buffers or images
393//  Verify that (actual & desired) flags != 0 or,
394//   if strict is true, verify that (actual & desired) flags == desired
395//  In case of error, report it via dbg callbacks
396static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict,
397                                     uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
398                                     char const *func_name, char const *usage_str) {
399    bool correct_usage = false;
400    bool skip_call = false;
401    if (strict)
402        correct_usage = ((actual & desired) == desired);
403    else
404        correct_usage = ((actual & desired) != 0);
405    if (!correct_usage) {
406        skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
407                            MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s 0x%" PRIxLEAST64
408                                                                " used by %s. In this case, %s should have %s set during creation.",
409                            ty_str, obj_handle, func_name, ty_str, usage_str);
410    }
411    return skip_call;
412}
413
414// Helper function to validate usage flags for buffers
415// For given buffer_node send actual vs. desired usage off to helper above where
416//  an error will be flagged if usage is not correct
417static bool ValidateImageUsageFlags(layer_data *dev_data, IMAGE_NODE const *image_node, VkFlags desired, VkBool32 strict,
418                                    char const *func_name, char const *usage_string) {
419    return validate_usage_flags(dev_data, image_node->createInfo.usage, desired, strict,
420                                reinterpret_cast<const uint64_t &>(image_node->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
421                                "image", func_name, usage_string);
422}
423
424// Helper function to validate usage flags for buffers
425// For given buffer_node send actual vs. desired usage off to helper above where
426//  an error will be flagged if usage is not correct
427static bool ValidateBufferUsageFlags(layer_data *dev_data, BUFFER_NODE const *buffer_node, VkFlags desired, VkBool32 strict,
428                                     char const *func_name, char const *usage_string) {
429    return validate_usage_flags(dev_data, buffer_node->createInfo.usage, desired, strict,
430                                reinterpret_cast<const uint64_t &>(buffer_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
431                                "buffer", func_name, usage_string);
432}
433
434// Return ptr to info in map container containing mem, or NULL if not found
435//  Calls to this function should be wrapped in mutex
436DEVICE_MEM_INFO *getMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
437    auto mem_it = dev_data->memObjMap.find(mem);
438    if (mem_it == dev_data->memObjMap.end()) {
439        return NULL;
440    }
441    return mem_it->second.get();
442}
443
444static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
445                             const VkMemoryAllocateInfo *pAllocateInfo) {
446    assert(object != NULL);
447
448    my_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
449}
450
451// Helper function to print lowercase string of object type
452//  TODO: Unify string helper functions, this should really come out of a string helper if not there already
453static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
454    switch (type) {
455    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
456        return "image";
457    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
458        return "buffer";
459    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT:
460        return "image view";
461    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT:
462        return "buffer view";
463    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
464        return "swapchain";
465    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT:
466        return "descriptor set";
467    case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT:
468        return "framebuffer";
469    case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT:
470        return "event";
471    case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT:
472        return "query pool";
473    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT:
474        return "descriptor pool";
475    case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT:
476        return "command pool";
477    case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT:
478        return "pipeline";
479    case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT:
480        return "sampler";
481    case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT:
482        return "renderpass";
483    case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT:
484        return "device memory";
485    case VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT:
486        return "semaphore";
487    default:
488        return "unknown";
489    }
490}
491
492// For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
493static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle,
494                                  VkDebugReportObjectTypeEXT type, const char *functionName) {
495    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
496    if (mem_info) {
497        if (!mem_info->bound_ranges[bound_object_handle].valid) {
498            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
499                           reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
500                           "%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64
501                           ", please fill the memory before using.",
502                           functionName, reinterpret_cast<uint64_t &>(mem), object_type_to_string(type), bound_object_handle);
503        }
504    }
505    return false;
506}
507// For given image_node
508//  If mem is special swapchain key, then verify that image_node valid member is true
509//  Else verify that the image's bound memory range is valid
510static bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_NODE *image_node, const char *functionName) {
511    if (image_node->mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
512        if (!image_node->valid) {
513            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
514                           reinterpret_cast<uint64_t &>(image_node->mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
515                           "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
516                           functionName, reinterpret_cast<uint64_t &>(image_node->image));
517        }
518    } else {
519        return ValidateMemoryIsValid(dev_data, image_node->mem, reinterpret_cast<uint64_t &>(image_node->image),
520                                     VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, functionName);
521    }
522    return false;
523}
524// For given buffer_node, verify that the range it's bound to is valid
525static bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_NODE *buffer_node, const char *functionName) {
526    return ValidateMemoryIsValid(dev_data, buffer_node->mem, reinterpret_cast<uint64_t &>(buffer_node->buffer),
527                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, functionName);
528}
529// For the given memory allocation, set the range bound by the given handle object to the valid param value
530static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
531    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
532    if (mem_info) {
533        mem_info->bound_ranges[handle].valid = valid;
534    }
535}
536// For given image node
537//  If mem is special swapchain key, then set entire image_node to valid param value
538//  Else set the image's bound memory range to valid param value
539static void SetImageMemoryValid(layer_data *dev_data, IMAGE_NODE *image_node, bool valid) {
540    if (image_node->mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
541        image_node->valid = valid;
542    } else {
543        SetMemoryValid(dev_data, image_node->mem, reinterpret_cast<uint64_t &>(image_node->image), valid);
544    }
545}
546// For given buffer node set the buffer's bound memory range to valid param value
547static void SetBufferMemoryValid(layer_data *dev_data, BUFFER_NODE *buffer_node, bool valid) {
548    SetMemoryValid(dev_data, buffer_node->mem, reinterpret_cast<uint64_t &>(buffer_node->buffer), valid);
549}
550// Find CB Info and add mem reference to list container
551// Find Mem Obj Info and add CB reference to list container
552static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
553                                              const char *apiName) {
554    bool skip_call = false;
555
556    // Skip validation if this image was created through WSI
557    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
558
559        // First update CB binding in MemObj mini CB list
560        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
561        if (pMemInfo) {
562            pMemInfo->command_buffer_bindings.insert(cb);
563            // Now update CBInfo's Mem reference list
564            GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
565            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
566            if (pCBNode) {
567                pCBNode->memObjs.insert(mem);
568            }
569        }
570    }
571    return skip_call;
572}
573
574// Create binding link between given sampler and command buffer node
575void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_NODE *sampler_node) {
576    sampler_node->cb_bindings.insert(cb_node);
577    cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(sampler_node->sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT});
578}
579
580// Create binding link between given image node and command buffer node
581void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_NODE *img_node) {
582    // Skip validation if this image was created through WSI
583    if (img_node->mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
584        // First update CB binding in MemObj mini CB list
585        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, img_node->mem);
586        if (pMemInfo) {
587            pMemInfo->command_buffer_bindings.insert(cb_node->commandBuffer);
588            // Now update CBInfo's Mem reference list
589            cb_node->memObjs.insert(img_node->mem);
590        }
591        // Now update cb binding for image
592        cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(img_node->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT});
593        img_node->cb_bindings.insert(cb_node);
594    }
595}
596
597// Create binding link between given image view node and its image with command buffer node
598void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
599    // First add bindings for imageView
600    view_state->cb_bindings.insert(cb_node);
601    cb_node->object_bindings.insert(
602        {reinterpret_cast<uint64_t &>(view_state->image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT});
603    auto image_node = getImageNode(dev_data, view_state->create_info.image);
604    // Add bindings for image within imageView
605    if (image_node) {
606        AddCommandBufferBindingImage(dev_data, cb_node, image_node);
607    }
608}
609
610// Create binding link between given buffer node and command buffer node
611void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_NODE *buff_node) {
612    // First update CB binding in MemObj mini CB list
613    DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, buff_node->mem);
614    if (pMemInfo) {
615        pMemInfo->command_buffer_bindings.insert(cb_node->commandBuffer);
616        // Now update CBInfo's Mem reference list
617        cb_node->memObjs.insert(buff_node->mem);
618        cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(buff_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
619    }
620    // Now update cb binding for buffer
621    buff_node->cb_bindings.insert(cb_node);
622}
623
624// For every mem obj bound to particular CB, free bindings related to that CB
625static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *pCBNode) {
626    if (pCBNode) {
627        if (pCBNode->memObjs.size() > 0) {
628            for (auto mem : pCBNode->memObjs) {
629                DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
630                if (pInfo) {
631                    pInfo->command_buffer_bindings.erase(pCBNode->commandBuffer);
632                }
633            }
634            pCBNode->memObjs.clear();
635        }
636        pCBNode->validate_functions.clear();
637    }
638}
639// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
640static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
641    clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
642}
643
644// For given MemObjInfo, report Obj & CB bindings. Clear any object bindings.
645static bool ReportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
646    bool skip_call = false;
647    size_t cmdBufRefCount = pMemObjInfo->command_buffer_bindings.size();
648    size_t objRefCount = pMemObjInfo->obj_bindings.size();
649
650    if ((pMemObjInfo->command_buffer_bindings.size()) != 0) {
651        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
652                            (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
653                            "Attempting to free memory object 0x%" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
654                            " references",
655                            (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
656    }
657
658    if (cmdBufRefCount > 0 && pMemObjInfo->command_buffer_bindings.size() > 0) {
659        for (auto cb : pMemObjInfo->command_buffer_bindings) {
660            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
661                    (uint64_t)cb, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
662                    "Command Buffer 0x%p still has a reference to mem obj 0x%" PRIxLEAST64, cb, (uint64_t)pMemObjInfo->mem);
663        }
664        // Clear the list of hanging references
665        pMemObjInfo->command_buffer_bindings.clear();
666    }
667
668    if (objRefCount > 0 && pMemObjInfo->obj_bindings.size() > 0) {
669        for (auto obj : pMemObjInfo->obj_bindings) {
670            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__,
671                    MEMTRACK_FREED_MEM_REF, "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64,
672                    obj.handle, (uint64_t)pMemObjInfo->mem);
673            // Clear mem binding for bound objects
674            switch (obj.type) {
675            case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
676                auto image_node = getImageNode(dev_data, reinterpret_cast<VkImage &>(obj.handle));
677                assert(image_node); // Any destroyed images should already be removed from bindings
678                image_node->mem = MEMORY_UNBOUND;
679                break;
680            }
681            case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
682                auto buff_node = getBufferNode(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
683                assert(buff_node); // Any destroyed buffers should already be removed from bindings
684                buff_node->mem = MEMORY_UNBOUND;
685                break;
686            }
687            default:
688                // Should only have buffer or image objects bound to memory
689                assert(0);
690            }
691        }
692        // Clear the list of hanging references
693        pMemObjInfo->obj_bindings.clear();
694    }
695    return skip_call;
696}
697
698static bool freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, bool internal) {
699    bool skip_call = false;
700    // Parse global list to find info w/ mem
701    DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
702    if (pInfo) {
703        // TODO: Verify against Valid Use section
704        // Clear any CB bindings for completed CBs
705        //   TODO : Is there a better place to do this?
706
707        assert(pInfo->object != VK_NULL_HANDLE);
708        // clear_cmd_buf_and_mem_references removes elements from
709        // pInfo->command_buffer_bindings -- this copy not needed in c++14,
710        // and probably not needed in practice in c++11
711        auto bindings = pInfo->command_buffer_bindings;
712        for (auto cb : bindings) {
713            if (!dev_data->globalInFlightCmdBuffers.count(cb)) {
714                clear_cmd_buf_and_mem_references(dev_data, cb);
715            }
716        }
717        // Now check for any remaining references to this mem obj and remove bindings
718        if (pInfo->command_buffer_bindings.size() || pInfo->obj_bindings.size()) {
719            skip_call |= ReportMemReferencesAndCleanUp(dev_data, pInfo);
720        }
721        // Delete mem obj info
722        dev_data->memObjMap.erase(dev_data->memObjMap.find(mem));
723    } else if (VK_NULL_HANDLE != mem) {
724        // The request is to free an invalid, non-zero handle
725        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
726                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
727                            reinterpret_cast<uint64_t &>(mem), __LINE__,
728                            MEMTRACK_INVALID_MEM_OBJ,
729                            "MEM", "Request to delete memory object 0x%"
730                            PRIxLEAST64 " not present in memory Object Map",
731                            reinterpret_cast<uint64_t &>(mem));
732    }
733    return skip_call;
734}
735
736// Remove object binding performs 3 tasks:
737// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
738// 2. Clear mem binding for image/buffer by setting its handle to 0
739// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
740static bool clear_object_binding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
741    // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
742    bool skip_call = false;
743    VkDeviceMemory *pMemBinding = GetObjectMemBinding(dev_data, handle, type);
744    if (pMemBinding) {
745        DEVICE_MEM_INFO *pMemObjInfo = getMemObjInfo(dev_data, *pMemBinding);
746        // TODO : Make sure this is a reasonable way to reset mem binding
747        *pMemBinding = VK_NULL_HANDLE;
748        if (pMemObjInfo) {
749            // This obj is bound to a memory object. Remove the reference to this object in that memory object's list,
750            // and set the objects memory binding pointer to NULL.
751            if (!pMemObjInfo->obj_bindings.erase({handle, type})) {
752                skip_call |=
753                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
754                            "MEM", "While trying to clear mem binding for %s obj 0x%" PRIxLEAST64
755                                   ", unable to find that object referenced by mem obj 0x%" PRIxLEAST64,
756                            object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
757            }
758        }
759    }
760    return skip_call;
761}
762
763// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
764bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
765                              const char *type_name) {
766    bool result = false;
767    if (VK_NULL_HANDLE == mem) {
768        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
769                         __LINE__, MEMTRACK_OBJECT_NOT_BOUND, "MEM",
770                         "%s: Vk%s object 0x%" PRIxLEAST64 " used with no memory bound. Memory should be bound by calling "
771                         "vkBind%sMemory().",
772                         api_name, type_name, handle, type_name);
773    } else if (MEMORY_UNBOUND == mem) {
774        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
775                         __LINE__, MEMTRACK_OBJECT_NOT_BOUND, "MEM",
776                         "%s: Vk%s object 0x%" PRIxLEAST64 " used with no memory bound and previously bound memory was freed. "
777                         "Memory must not be freed prior to this operation.",
778                         api_name, type_name, handle);
779    }
780    return result;
781}
782
783// Check to see if memory was ever bound to this image
784bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_NODE *image_node, const char *api_name) {
785    bool result = false;
786    if (0 == (static_cast<uint32_t>(image_node->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
787        result = VerifyBoundMemoryIsValid(dev_data, image_node->mem, reinterpret_cast<const uint64_t &>(image_node->image),
788                                          api_name, "Image");
789    }
790    return result;
791}
792
793// Check to see if memory was bound to this buffer
794bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_NODE *buffer_node, const char *api_name) {
795    bool result = false;
796    if (0 == (static_cast<uint32_t>(buffer_node->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
797        result = VerifyBoundMemoryIsValid(dev_data, buffer_node->mem, reinterpret_cast<const uint64_t &>(buffer_node->buffer),
798                                          api_name, "Buffer");
799    }
800    return result;
801}
802
803// For NULL mem case, output warning
804// Make sure given object is in global object map
805//  IF a previous binding existed, output validation error
806//  Otherwise, add reference from objectInfo to memoryInfo
807//  Add reference off of objInfo
808static bool SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VkDebugReportObjectTypeEXT type,
809                          const char *apiName) {
810    bool skip_call = false;
811    // Handle NULL case separately, just clear previous binding & decrement reference
812    if (mem == VK_NULL_HANDLE) {
813        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
814                            "MEM", "In %s, attempting to Bind Obj(0x%" PRIxLEAST64 ") to NULL", apiName, handle);
815    } else {
816        bool sparse = false;
817        VkDeviceMemory *mem_binding = GetObjectMemBinding(dev_data, handle, type, &sparse);
818        assert(mem_binding);
819        DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
820        if (mem_info) {
821            DEVICE_MEM_INFO *prev_binding = getMemObjInfo(dev_data, *mem_binding);
822            if (prev_binding) {
823                skip_call |=
824                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
825                            reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
826                            "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
827                            ") which has already been bound to mem object 0x%" PRIxLEAST64,
828                            apiName, reinterpret_cast<uint64_t &>(mem), handle, reinterpret_cast<uint64_t &>(prev_binding->mem));
829            } else if ((*mem_binding == MEMORY_UNBOUND) && (!sparse)) {
830                skip_call |=
831                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
832                            reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
833                            "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
834                            ") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
835                            "Vulkan so this attempt to bind to new memory is not allowed.",
836                            apiName, reinterpret_cast<uint64_t &>(mem), handle);
837            } else {
838                mem_info->obj_bindings.insert({handle, type});
839                // For image objects, make sure default memory state is correctly set
840                // TODO : What's the best/correct way to handle this?
841                if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
842                    auto const image_node = getImageNode(dev_data, VkImage(handle));
843                    if (image_node) {
844                        VkImageCreateInfo ici = image_node->createInfo;
845                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
846                            // TODO::  More memory state transition stuff.
847                        }
848                    }
849                }
850                *mem_binding = mem;
851            }
852        }
853    }
854    return skip_call;
855}
856
857// For NULL mem case, clear any previous binding Else...
858// Make sure given object is in its object map
859//  IF a previous binding existed, update binding
860//  Add reference from objectInfo to memoryInfo
861//  Add reference off of object's binding info
862// Return VK_TRUE if addition is successful, VK_FALSE otherwise
863static bool set_sparse_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
864                                       VkDebugReportObjectTypeEXT type, const char *apiName) {
865    bool skip_call = VK_FALSE;
866    // Handle NULL case separately, just clear previous binding & decrement reference
867    if (mem == VK_NULL_HANDLE) {
868        skip_call = clear_object_binding(dev_data, handle, type);
869    } else {
870        VkDeviceMemory *pMemBinding = GetObjectMemBinding(dev_data, handle, type);
871        assert(pMemBinding);
872        DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
873        if (pInfo) {
874            pInfo->obj_bindings.insert({handle, type});
875            // Need to set mem binding for this object
876            *pMemBinding = mem;
877        }
878    }
879    return skip_call;
880}
881
882// For handle of given object type, return memory binding
883static bool get_mem_for_type(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
884    bool skip_call = false;
885    *mem = VK_NULL_HANDLE;
886    switch (type) {
887    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
888        *mem = getImageNode(dev_data, VkImage(handle))->mem;
889        break;
890    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
891        *mem = getBufferNode(dev_data, VkBuffer(handle))->mem;
892        break;
893    default:
894        assert(0);
895    }
896    if (!*mem) {
897        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
898                            "MEM", "Trying to get mem binding for %s object 0x%" PRIxLEAST64
899                                   " but binding is NULL. Has memory been bound to this object?",
900                            object_type_to_string(type), handle);
901    }
902    return skip_call;
903}
904
905// Print details of MemObjInfo list
906static void print_mem_list(layer_data *dev_data) {
907    // Early out if info is not requested
908    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
909        return;
910    }
911
912    // Just printing each msg individually for now, may want to package these into single large print
913    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
914            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
915            dev_data->memObjMap.size());
916    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
917            MEMTRACK_NONE, "MEM", "=============================");
918
919    if (dev_data->memObjMap.size() <= 0)
920        return;
921
922    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
923        auto mem_info = (*ii).second.get();
924
925        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
926                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at 0x%p===", (void *)mem_info);
927        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
928                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: 0x%" PRIxLEAST64, (uint64_t)(mem_info->mem));
929        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
930                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: " PRINTF_SIZE_T_SPECIFIER,
931                mem_info->command_buffer_bindings.size() + mem_info->obj_bindings.size());
932        if (0 != mem_info->alloc_info.allocationSize) {
933            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&mem_info->alloc_info, "MEM(INFO):         ");
934            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
935                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
936        } else {
937            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
938                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
939        }
940
941        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
942                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
943                mem_info->obj_bindings.size());
944        if (mem_info->obj_bindings.size() > 0) {
945            for (auto obj : mem_info->obj_bindings) {
946                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
947                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT 0x%" PRIx64, obj.handle);
948            }
949        }
950
951        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
952                __LINE__, MEMTRACK_NONE, "MEM",
953                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
954                mem_info->command_buffer_bindings.size());
955        if (mem_info->command_buffer_bindings.size() > 0) {
956            for (auto cb : mem_info->command_buffer_bindings) {
957                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
958                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB 0x%p", cb);
959            }
960        }
961    }
962}
963
964static void printCBList(layer_data *my_data) {
965    GLOBAL_CB_NODE *pCBInfo = NULL;
966
967    // Early out if info is not requested
968    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
969        return;
970    }
971
972    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
973            MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
974            my_data->commandBufferMap.size());
975    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
976            MEMTRACK_NONE, "MEM", "==================");
977
978    if (my_data->commandBufferMap.size() <= 0)
979        return;
980
981    for (auto &cb_node : my_data->commandBufferMap) {
982        pCBInfo = cb_node.second;
983
984        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
985                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (0x%p) has CB 0x%p", (void *)pCBInfo, (void *)pCBInfo->commandBuffer);
986
987        if (pCBInfo->memObjs.size() <= 0)
988            continue;
989        for (auto obj : pCBInfo->memObjs) {
990            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
991                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj 0x%" PRIx64, (uint64_t)obj);
992        }
993    }
994}
995
996// Return a string representation of CMD_TYPE enum
997static string cmdTypeToString(CMD_TYPE cmd) {
998    switch (cmd) {
999    case CMD_BINDPIPELINE:
1000        return "CMD_BINDPIPELINE";
1001    case CMD_BINDPIPELINEDELTA:
1002        return "CMD_BINDPIPELINEDELTA";
1003    case CMD_SETVIEWPORTSTATE:
1004        return "CMD_SETVIEWPORTSTATE";
1005    case CMD_SETLINEWIDTHSTATE:
1006        return "CMD_SETLINEWIDTHSTATE";
1007    case CMD_SETDEPTHBIASSTATE:
1008        return "CMD_SETDEPTHBIASSTATE";
1009    case CMD_SETBLENDSTATE:
1010        return "CMD_SETBLENDSTATE";
1011    case CMD_SETDEPTHBOUNDSSTATE:
1012        return "CMD_SETDEPTHBOUNDSSTATE";
1013    case CMD_SETSTENCILREADMASKSTATE:
1014        return "CMD_SETSTENCILREADMASKSTATE";
1015    case CMD_SETSTENCILWRITEMASKSTATE:
1016        return "CMD_SETSTENCILWRITEMASKSTATE";
1017    case CMD_SETSTENCILREFERENCESTATE:
1018        return "CMD_SETSTENCILREFERENCESTATE";
1019    case CMD_BINDDESCRIPTORSETS:
1020        return "CMD_BINDDESCRIPTORSETS";
1021    case CMD_BINDINDEXBUFFER:
1022        return "CMD_BINDINDEXBUFFER";
1023    case CMD_BINDVERTEXBUFFER:
1024        return "CMD_BINDVERTEXBUFFER";
1025    case CMD_DRAW:
1026        return "CMD_DRAW";
1027    case CMD_DRAWINDEXED:
1028        return "CMD_DRAWINDEXED";
1029    case CMD_DRAWINDIRECT:
1030        return "CMD_DRAWINDIRECT";
1031    case CMD_DRAWINDEXEDINDIRECT:
1032        return "CMD_DRAWINDEXEDINDIRECT";
1033    case CMD_DISPATCH:
1034        return "CMD_DISPATCH";
1035    case CMD_DISPATCHINDIRECT:
1036        return "CMD_DISPATCHINDIRECT";
1037    case CMD_COPYBUFFER:
1038        return "CMD_COPYBUFFER";
1039    case CMD_COPYIMAGE:
1040        return "CMD_COPYIMAGE";
1041    case CMD_BLITIMAGE:
1042        return "CMD_BLITIMAGE";
1043    case CMD_COPYBUFFERTOIMAGE:
1044        return "CMD_COPYBUFFERTOIMAGE";
1045    case CMD_COPYIMAGETOBUFFER:
1046        return "CMD_COPYIMAGETOBUFFER";
1047    case CMD_CLONEIMAGEDATA:
1048        return "CMD_CLONEIMAGEDATA";
1049    case CMD_UPDATEBUFFER:
1050        return "CMD_UPDATEBUFFER";
1051    case CMD_FILLBUFFER:
1052        return "CMD_FILLBUFFER";
1053    case CMD_CLEARCOLORIMAGE:
1054        return "CMD_CLEARCOLORIMAGE";
1055    case CMD_CLEARATTACHMENTS:
1056        return "CMD_CLEARCOLORATTACHMENT";
1057    case CMD_CLEARDEPTHSTENCILIMAGE:
1058        return "CMD_CLEARDEPTHSTENCILIMAGE";
1059    case CMD_RESOLVEIMAGE:
1060        return "CMD_RESOLVEIMAGE";
1061    case CMD_SETEVENT:
1062        return "CMD_SETEVENT";
1063    case CMD_RESETEVENT:
1064        return "CMD_RESETEVENT";
1065    case CMD_WAITEVENTS:
1066        return "CMD_WAITEVENTS";
1067    case CMD_PIPELINEBARRIER:
1068        return "CMD_PIPELINEBARRIER";
1069    case CMD_BEGINQUERY:
1070        return "CMD_BEGINQUERY";
1071    case CMD_ENDQUERY:
1072        return "CMD_ENDQUERY";
1073    case CMD_RESETQUERYPOOL:
1074        return "CMD_RESETQUERYPOOL";
1075    case CMD_COPYQUERYPOOLRESULTS:
1076        return "CMD_COPYQUERYPOOLRESULTS";
1077    case CMD_WRITETIMESTAMP:
1078        return "CMD_WRITETIMESTAMP";
1079    case CMD_INITATOMICCOUNTERS:
1080        return "CMD_INITATOMICCOUNTERS";
1081    case CMD_LOADATOMICCOUNTERS:
1082        return "CMD_LOADATOMICCOUNTERS";
1083    case CMD_SAVEATOMICCOUNTERS:
1084        return "CMD_SAVEATOMICCOUNTERS";
1085    case CMD_BEGINRENDERPASS:
1086        return "CMD_BEGINRENDERPASS";
1087    case CMD_ENDRENDERPASS:
1088        return "CMD_ENDRENDERPASS";
1089    default:
1090        return "UNKNOWN";
1091    }
1092}
1093
1094// SPIRV utility functions
1095static void build_def_index(shader_module *module) {
1096    for (auto insn : *module) {
1097        switch (insn.opcode()) {
1098        /* Types */
1099        case spv::OpTypeVoid:
1100        case spv::OpTypeBool:
1101        case spv::OpTypeInt:
1102        case spv::OpTypeFloat:
1103        case spv::OpTypeVector:
1104        case spv::OpTypeMatrix:
1105        case spv::OpTypeImage:
1106        case spv::OpTypeSampler:
1107        case spv::OpTypeSampledImage:
1108        case spv::OpTypeArray:
1109        case spv::OpTypeRuntimeArray:
1110        case spv::OpTypeStruct:
1111        case spv::OpTypeOpaque:
1112        case spv::OpTypePointer:
1113        case spv::OpTypeFunction:
1114        case spv::OpTypeEvent:
1115        case spv::OpTypeDeviceEvent:
1116        case spv::OpTypeReserveId:
1117        case spv::OpTypeQueue:
1118        case spv::OpTypePipe:
1119            module->def_index[insn.word(1)] = insn.offset();
1120            break;
1121
1122        /* Fixed constants */
1123        case spv::OpConstantTrue:
1124        case spv::OpConstantFalse:
1125        case spv::OpConstant:
1126        case spv::OpConstantComposite:
1127        case spv::OpConstantSampler:
1128        case spv::OpConstantNull:
1129            module->def_index[insn.word(2)] = insn.offset();
1130            break;
1131
1132        /* Specialization constants */
1133        case spv::OpSpecConstantTrue:
1134        case spv::OpSpecConstantFalse:
1135        case spv::OpSpecConstant:
1136        case spv::OpSpecConstantComposite:
1137        case spv::OpSpecConstantOp:
1138            module->def_index[insn.word(2)] = insn.offset();
1139            break;
1140
1141        /* Variables */
1142        case spv::OpVariable:
1143            module->def_index[insn.word(2)] = insn.offset();
1144            break;
1145
1146        /* Functions */
1147        case spv::OpFunction:
1148            module->def_index[insn.word(2)] = insn.offset();
1149            break;
1150
1151        default:
1152            /* We don't care about any other defs for now. */
1153            break;
1154        }
1155    }
1156}
1157
1158static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1159    for (auto insn : *src) {
1160        if (insn.opcode() == spv::OpEntryPoint) {
1161            auto entrypointName = (char const *)&insn.word(3);
1162            auto entrypointStageBits = 1u << insn.word(1);
1163
1164            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1165                return insn;
1166            }
1167        }
1168    }
1169
1170    return src->end();
1171}
1172
1173static char const *storage_class_name(unsigned sc) {
1174    switch (sc) {
1175    case spv::StorageClassInput:
1176        return "input";
1177    case spv::StorageClassOutput:
1178        return "output";
1179    case spv::StorageClassUniformConstant:
1180        return "const uniform";
1181    case spv::StorageClassUniform:
1182        return "uniform";
1183    case spv::StorageClassWorkgroup:
1184        return "workgroup local";
1185    case spv::StorageClassCrossWorkgroup:
1186        return "workgroup global";
1187    case spv::StorageClassPrivate:
1188        return "private global";
1189    case spv::StorageClassFunction:
1190        return "function";
1191    case spv::StorageClassGeneric:
1192        return "generic";
1193    case spv::StorageClassAtomicCounter:
1194        return "atomic counter";
1195    case spv::StorageClassImage:
1196        return "image";
1197    case spv::StorageClassPushConstant:
1198        return "push constant";
1199    default:
1200        return "unknown";
1201    }
1202}
1203
1204/* get the value of an integral constant */
1205unsigned get_constant_value(shader_module const *src, unsigned id) {
1206    auto value = src->get_def(id);
1207    assert(value != src->end());
1208
1209    if (value.opcode() != spv::OpConstant) {
1210        /* TODO: Either ensure that the specialization transform is already performed on a module we're
1211            considering here, OR -- specialize on the fly now.
1212            */
1213        return 1;
1214    }
1215
1216    return value.word(3);
1217}
1218
1219
1220static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1221    auto insn = src->get_def(type);
1222    assert(insn != src->end());
1223
1224    switch (insn.opcode()) {
1225    case spv::OpTypeBool:
1226        ss << "bool";
1227        break;
1228    case spv::OpTypeInt:
1229        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1230        break;
1231    case spv::OpTypeFloat:
1232        ss << "float" << insn.word(2);
1233        break;
1234    case spv::OpTypeVector:
1235        ss << "vec" << insn.word(3) << " of ";
1236        describe_type_inner(ss, src, insn.word(2));
1237        break;
1238    case spv::OpTypeMatrix:
1239        ss << "mat" << insn.word(3) << " of ";
1240        describe_type_inner(ss, src, insn.word(2));
1241        break;
1242    case spv::OpTypeArray:
1243        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1244        describe_type_inner(ss, src, insn.word(2));
1245        break;
1246    case spv::OpTypePointer:
1247        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1248        describe_type_inner(ss, src, insn.word(3));
1249        break;
1250    case spv::OpTypeStruct: {
1251        ss << "struct of (";
1252        for (unsigned i = 2; i < insn.len(); i++) {
1253            describe_type_inner(ss, src, insn.word(i));
1254            if (i == insn.len() - 1) {
1255                ss << ")";
1256            } else {
1257                ss << ", ";
1258            }
1259        }
1260        break;
1261    }
1262    case spv::OpTypeSampler:
1263        ss << "sampler";
1264        break;
1265    case spv::OpTypeSampledImage:
1266        ss << "sampler+";
1267        describe_type_inner(ss, src, insn.word(2));
1268        break;
1269    case spv::OpTypeImage:
1270        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1271        break;
1272    default:
1273        ss << "oddtype";
1274        break;
1275    }
1276}
1277
1278
1279static std::string describe_type(shader_module const *src, unsigned type) {
1280    std::ostringstream ss;
1281    describe_type_inner(ss, src, type);
1282    return ss.str();
1283}
1284
1285
1286static bool is_narrow_numeric_type(spirv_inst_iter type)
1287{
1288    if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat)
1289        return false;
1290    return type.word(2) < 64;
1291}
1292
1293
1294static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) {
1295    /* walk two type trees together, and complain about differences */
1296    auto a_insn = a->get_def(a_type);
1297    auto b_insn = b->get_def(b_type);
1298    assert(a_insn != a->end());
1299    assert(b_insn != b->end());
1300
1301    if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1302        return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
1303    }
1304
1305    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1306        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1307        return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1308    }
1309
1310    if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1311        return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
1312    }
1313
1314    if (a_insn.opcode() != b_insn.opcode()) {
1315        return false;
1316    }
1317
1318    if (a_insn.opcode() == spv::OpTypePointer) {
1319        /* match on pointee type. storage class is expected to differ */
1320        return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
1321    }
1322
1323    if (a_arrayed || b_arrayed) {
1324        /* if we havent resolved array-of-verts by here, we're not going to. */
1325        return false;
1326    }
1327
1328    switch (a_insn.opcode()) {
1329    case spv::OpTypeBool:
1330        return true;
1331    case spv::OpTypeInt:
1332        /* match on width, signedness */
1333        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1334    case spv::OpTypeFloat:
1335        /* match on width */
1336        return a_insn.word(2) == b_insn.word(2);
1337    case spv::OpTypeVector:
1338        /* match on element type, count. */
1339        if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false))
1340            return false;
1341        if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1342            return a_insn.word(3) >= b_insn.word(3);
1343        }
1344        else {
1345            return a_insn.word(3) == b_insn.word(3);
1346        }
1347    case spv::OpTypeMatrix:
1348        /* match on element type, count. */
1349        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3);
1350    case spv::OpTypeArray:
1351        /* match on element type, count. these all have the same layout. we don't get here if
1352         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1353         * not a literal within OpTypeArray */
1354        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1355               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1356    case spv::OpTypeStruct:
1357        /* match on all element types */
1358        {
1359            if (a_insn.len() != b_insn.len()) {
1360                return false; /* structs cannot match if member counts differ */
1361            }
1362
1363            for (unsigned i = 2; i < a_insn.len(); i++) {
1364                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
1365                    return false;
1366                }
1367            }
1368
1369            return true;
1370        }
1371    default:
1372        /* remaining types are CLisms, or may not appear in the interfaces we
1373         * are interested in. Just claim no match.
1374         */
1375        return false;
1376    }
1377}
1378
1379static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1380    auto it = map.find(id);
1381    if (it == map.end())
1382        return def;
1383    else
1384        return it->second;
1385}
1386
1387static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1388    auto insn = src->get_def(type);
1389    assert(insn != src->end());
1390
1391    switch (insn.opcode()) {
1392    case spv::OpTypePointer:
1393        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1394         * we're never actually passing pointers around. */
1395        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1396    case spv::OpTypeArray:
1397        if (strip_array_level) {
1398            return get_locations_consumed_by_type(src, insn.word(2), false);
1399        } else {
1400            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1401        }
1402    case spv::OpTypeMatrix:
1403        /* num locations is the dimension * element size */
1404        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1405    case spv::OpTypeVector: {
1406        auto scalar_type = src->get_def(insn.word(2));
1407        auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ?
1408            scalar_type.word(2) : 32;
1409
1410        /* locations are 128-bit wide; 3- and 4-component vectors of 64 bit
1411         * types require two. */
1412        return (bit_width * insn.word(3) + 127) / 128;
1413    }
1414    default:
1415        /* everything else is just 1. */
1416        return 1;
1417
1418        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1419         * multiple locations. */
1420    }
1421}
1422
1423static unsigned get_locations_consumed_by_format(VkFormat format) {
1424    switch (format) {
1425    case VK_FORMAT_R64G64B64A64_SFLOAT:
1426    case VK_FORMAT_R64G64B64A64_SINT:
1427    case VK_FORMAT_R64G64B64A64_UINT:
1428    case VK_FORMAT_R64G64B64_SFLOAT:
1429    case VK_FORMAT_R64G64B64_SINT:
1430    case VK_FORMAT_R64G64B64_UINT:
1431        return 2;
1432    default:
1433        return 1;
1434    }
1435}
1436
1437typedef std::pair<unsigned, unsigned> location_t;
1438typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1439
1440struct interface_var {
1441    uint32_t id;
1442    uint32_t type_id;
1443    uint32_t offset;
1444    bool is_patch;
1445    bool is_block_member;
1446    /* TODO: collect the name, too? Isn't required to be present. */
1447};
1448
1449struct shader_stage_attributes {
1450    char const *const name;
1451    bool arrayed_input;
1452    bool arrayed_output;
1453};
1454
1455static shader_stage_attributes shader_stage_attribs[] = {
1456    {"vertex shader", false, false},
1457    {"tessellation control shader", true, true},
1458    {"tessellation evaluation shader", true, false},
1459    {"geometry shader", true, false},
1460    {"fragment shader", false, false},
1461};
1462
1463static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1464    while (true) {
1465
1466        if (def.opcode() == spv::OpTypePointer) {
1467            def = src->get_def(def.word(3));
1468        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1469            def = src->get_def(def.word(2));
1470            is_array_of_verts = false;
1471        } else if (def.opcode() == spv::OpTypeStruct) {
1472            return def;
1473        } else {
1474            return src->end();
1475        }
1476    }
1477}
1478
1479static void collect_interface_block_members(shader_module const *src,
1480                                            std::map<location_t, interface_var> *out,
1481                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1482                                            uint32_t id, uint32_t type_id, bool is_patch) {
1483    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1484    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1485    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1486        /* this isn't an interface block. */
1487        return;
1488    }
1489
1490    std::unordered_map<unsigned, unsigned> member_components;
1491
1492    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1493    for (auto insn : *src) {
1494        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1495            unsigned member_index = insn.word(2);
1496
1497            if (insn.word(3) == spv::DecorationComponent) {
1498                unsigned component = insn.word(4);
1499                member_components[member_index] = component;
1500            }
1501        }
1502    }
1503
1504    /* Second pass -- produce the output, from Location decorations */
1505    for (auto insn : *src) {
1506        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1507            unsigned member_index = insn.word(2);
1508            unsigned member_type_id = type.word(2 + member_index);
1509
1510            if (insn.word(3) == spv::DecorationLocation) {
1511                unsigned location = insn.word(4);
1512                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1513                auto component_it = member_components.find(member_index);
1514                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1515
1516                for (unsigned int offset = 0; offset < num_locations; offset++) {
1517                    interface_var v;
1518                    v.id = id;
1519                    /* TODO: member index in interface_var too? */
1520                    v.type_id = member_type_id;
1521                    v.offset = offset;
1522                    v.is_patch = is_patch;
1523                    v.is_block_member = true;
1524                    (*out)[std::make_pair(location + offset, component)] = v;
1525                }
1526            }
1527        }
1528    }
1529}
1530
1531static std::map<location_t, interface_var> collect_interface_by_location(
1532        shader_module const *src, spirv_inst_iter entrypoint,
1533        spv::StorageClass sinterface, bool is_array_of_verts) {
1534
1535    std::unordered_map<unsigned, unsigned> var_locations;
1536    std::unordered_map<unsigned, unsigned> var_builtins;
1537    std::unordered_map<unsigned, unsigned> var_components;
1538    std::unordered_map<unsigned, unsigned> blocks;
1539    std::unordered_map<unsigned, unsigned> var_patch;
1540
1541    for (auto insn : *src) {
1542
1543        /* We consider two interface models: SSO rendezvous-by-location, and
1544         * builtins. Complain about anything that fits neither model.
1545         */
1546        if (insn.opcode() == spv::OpDecorate) {
1547            if (insn.word(2) == spv::DecorationLocation) {
1548                var_locations[insn.word(1)] = insn.word(3);
1549            }
1550
1551            if (insn.word(2) == spv::DecorationBuiltIn) {
1552                var_builtins[insn.word(1)] = insn.word(3);
1553            }
1554
1555            if (insn.word(2) == spv::DecorationComponent) {
1556                var_components[insn.word(1)] = insn.word(3);
1557            }
1558
1559            if (insn.word(2) == spv::DecorationBlock) {
1560                blocks[insn.word(1)] = 1;
1561            }
1562
1563            if (insn.word(2) == spv::DecorationPatch) {
1564                var_patch[insn.word(1)] = 1;
1565            }
1566        }
1567    }
1568
1569    /* TODO: handle grouped decorations */
1570    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1571     * have the same location, and we DON'T want to clobber. */
1572
1573    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1574       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1575       the word to determine which word contains the terminator. */
1576    uint32_t word = 3;
1577    while (entrypoint.word(word) & 0xff000000u) {
1578        ++word;
1579    }
1580    ++word;
1581
1582    std::map<location_t, interface_var> out;
1583
1584    for (; word < entrypoint.len(); word++) {
1585        auto insn = src->get_def(entrypoint.word(word));
1586        assert(insn != src->end());
1587        assert(insn.opcode() == spv::OpVariable);
1588
1589        if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
1590            unsigned id = insn.word(2);
1591            unsigned type = insn.word(1);
1592
1593            int location = value_or_default(var_locations, id, -1);
1594            int builtin = value_or_default(var_builtins, id, -1);
1595            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1596            bool is_patch = var_patch.find(id) != var_patch.end();
1597
1598            /* All variables and interface block members in the Input or Output storage classes
1599             * must be decorated with either a builtin or an explicit location.
1600             *
1601             * TODO: integrate the interface block support here. For now, don't complain --
1602             * a valid SPIRV module will only hit this path for the interface block case, as the
1603             * individual members of the type are decorated, rather than variable declarations.
1604             */
1605
1606            if (location != -1) {
1607                /* A user-defined interface variable, with a location. Where a variable
1608                 * occupied multiple locations, emit one result for each. */
1609                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1610                for (unsigned int offset = 0; offset < num_locations; offset++) {
1611                    interface_var v;
1612                    v.id = id;
1613                    v.type_id = type;
1614                    v.offset = offset;
1615                    v.is_patch = is_patch;
1616                    v.is_block_member = false;
1617                    out[std::make_pair(location + offset, component)] = v;
1618                }
1619            } else if (builtin == -1) {
1620                /* An interface block instance */
1621                collect_interface_block_members(src, &out, blocks, is_array_of_verts, id, type, is_patch);
1622            }
1623        }
1624    }
1625
1626    return out;
1627}
1628
1629static std::vector<std::pair<uint32_t, interface_var>> collect_interface_by_input_attachment_index(
1630        debug_report_data *report_data, shader_module const *src,
1631        std::unordered_set<uint32_t> const &accessible_ids) {
1632
1633    std::vector<std::pair<uint32_t, interface_var>> out;
1634
1635    for (auto insn : *src) {
1636        if (insn.opcode() == spv::OpDecorate) {
1637            if (insn.word(2) == spv::DecorationInputAttachmentIndex) {
1638                auto attachment_index = insn.word(3);
1639                auto id = insn.word(1);
1640
1641                if (accessible_ids.count(id)) {
1642                    auto def = src->get_def(id);
1643                    assert(def != src->end());
1644
1645                    if (def.opcode() == spv::OpVariable && insn.word(3) == spv::StorageClassUniformConstant) {
1646                        auto num_locations = get_locations_consumed_by_type(src, def.word(1), false);
1647                        for (unsigned int offset = 0; offset < num_locations; offset++) {
1648                            interface_var v;
1649                            v.id = id;
1650                            v.type_id = def.word(1);
1651                            v.offset = offset;
1652                            v.is_patch = false;
1653                            v.is_block_member = false;
1654                            out.emplace_back(attachment_index + offset, v);
1655                        }
1656                    }
1657                }
1658            }
1659        }
1660    }
1661
1662    return out;
1663}
1664
1665static std::vector<std::pair<descriptor_slot_t, interface_var>> collect_interface_by_descriptor_slot(
1666        debug_report_data *report_data, shader_module const *src,
1667        std::unordered_set<uint32_t> const &accessible_ids) {
1668
1669    std::unordered_map<unsigned, unsigned> var_sets;
1670    std::unordered_map<unsigned, unsigned> var_bindings;
1671
1672    for (auto insn : *src) {
1673        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1674         * DecorationDescriptorSet and DecorationBinding.
1675         */
1676        if (insn.opcode() == spv::OpDecorate) {
1677            if (insn.word(2) == spv::DecorationDescriptorSet) {
1678                var_sets[insn.word(1)] = insn.word(3);
1679            }
1680
1681            if (insn.word(2) == spv::DecorationBinding) {
1682                var_bindings[insn.word(1)] = insn.word(3);
1683            }
1684        }
1685    }
1686
1687    std::vector<std::pair<descriptor_slot_t, interface_var>> out;
1688
1689    for (auto id : accessible_ids) {
1690        auto insn = src->get_def(id);
1691        assert(insn != src->end());
1692
1693        if (insn.opcode() == spv::OpVariable &&
1694            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1695            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1696            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1697
1698            interface_var v;
1699            v.id = insn.word(2);
1700            v.type_id = insn.word(1);
1701            v.offset = 0;
1702            v.is_patch = false;
1703            v.is_block_member = false;
1704            out.emplace_back(std::make_pair(set, binding), v);
1705        }
1706    }
1707
1708    return out;
1709}
1710
1711static bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer,
1712                                              spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1713                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1714                                              shader_stage_attributes const *consumer_stage) {
1715    bool pass = true;
1716
1717    auto outputs = collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, producer_stage->arrayed_output);
1718    auto inputs = collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, consumer_stage->arrayed_input);
1719
1720    auto a_it = outputs.begin();
1721    auto b_it = inputs.begin();
1722
1723    /* maps sorted by key (location); walk them together to find mismatches */
1724    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1725        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1726        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1727        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1728        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1729
1730        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1731            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1732                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1733                        "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1734                        a_first.second, consumer_stage->name)) {
1735                pass = false;
1736            }
1737            a_it++;
1738        } else if (a_at_end || a_first > b_first) {
1739            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1740                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1741                        "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second,
1742                        producer_stage->name)) {
1743                pass = false;
1744            }
1745            b_it++;
1746        } else {
1747            // subtleties of arrayed interfaces:
1748            // - if is_patch, then the member is not arrayed, even though the interface may be.
1749            // - if is_block_member, then the extra array level of an arrayed interface is not
1750            //   expressed in the member type -- it's expressed in the block type.
1751            if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1752                             producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1753                             consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member,
1754                             true)) {
1755                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1756                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1757                            a_first.first, a_first.second,
1758                            describe_type(producer, a_it->second.type_id).c_str(),
1759                            describe_type(consumer, b_it->second.type_id).c_str())) {
1760                    pass = false;
1761                }
1762            }
1763            if (a_it->second.is_patch != b_it->second.is_patch) {
1764                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1765                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1766                            "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1767                            "per-%s in %s stage", a_first.first, a_first.second,
1768                            a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1769                            b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1770                    pass = false;
1771                }
1772            }
1773            a_it++;
1774            b_it++;
1775        }
1776    }
1777
1778    return pass;
1779}
1780
1781enum FORMAT_TYPE {
1782    FORMAT_TYPE_UNDEFINED,
1783    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1784    FORMAT_TYPE_SINT,
1785    FORMAT_TYPE_UINT,
1786};
1787
1788static unsigned get_format_type(VkFormat fmt) {
1789    switch (fmt) {
1790    case VK_FORMAT_UNDEFINED:
1791        return FORMAT_TYPE_UNDEFINED;
1792    case VK_FORMAT_R8_SINT:
1793    case VK_FORMAT_R8G8_SINT:
1794    case VK_FORMAT_R8G8B8_SINT:
1795    case VK_FORMAT_R8G8B8A8_SINT:
1796    case VK_FORMAT_R16_SINT:
1797    case VK_FORMAT_R16G16_SINT:
1798    case VK_FORMAT_R16G16B16_SINT:
1799    case VK_FORMAT_R16G16B16A16_SINT:
1800    case VK_FORMAT_R32_SINT:
1801    case VK_FORMAT_R32G32_SINT:
1802    case VK_FORMAT_R32G32B32_SINT:
1803    case VK_FORMAT_R32G32B32A32_SINT:
1804    case VK_FORMAT_R64_SINT:
1805    case VK_FORMAT_R64G64_SINT:
1806    case VK_FORMAT_R64G64B64_SINT:
1807    case VK_FORMAT_R64G64B64A64_SINT:
1808    case VK_FORMAT_B8G8R8_SINT:
1809    case VK_FORMAT_B8G8R8A8_SINT:
1810    case VK_FORMAT_A8B8G8R8_SINT_PACK32:
1811    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1812    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1813        return FORMAT_TYPE_SINT;
1814    case VK_FORMAT_R8_UINT:
1815    case VK_FORMAT_R8G8_UINT:
1816    case VK_FORMAT_R8G8B8_UINT:
1817    case VK_FORMAT_R8G8B8A8_UINT:
1818    case VK_FORMAT_R16_UINT:
1819    case VK_FORMAT_R16G16_UINT:
1820    case VK_FORMAT_R16G16B16_UINT:
1821    case VK_FORMAT_R16G16B16A16_UINT:
1822    case VK_FORMAT_R32_UINT:
1823    case VK_FORMAT_R32G32_UINT:
1824    case VK_FORMAT_R32G32B32_UINT:
1825    case VK_FORMAT_R32G32B32A32_UINT:
1826    case VK_FORMAT_R64_UINT:
1827    case VK_FORMAT_R64G64_UINT:
1828    case VK_FORMAT_R64G64B64_UINT:
1829    case VK_FORMAT_R64G64B64A64_UINT:
1830    case VK_FORMAT_B8G8R8_UINT:
1831    case VK_FORMAT_B8G8R8A8_UINT:
1832    case VK_FORMAT_A8B8G8R8_UINT_PACK32:
1833    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1834    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1835        return FORMAT_TYPE_UINT;
1836    default:
1837        return FORMAT_TYPE_FLOAT;
1838    }
1839}
1840
1841/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1842 * for comparison to a VkFormat's characterization above. */
1843static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1844    auto insn = src->get_def(type);
1845    assert(insn != src->end());
1846
1847    switch (insn.opcode()) {
1848    case spv::OpTypeInt:
1849        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1850    case spv::OpTypeFloat:
1851        return FORMAT_TYPE_FLOAT;
1852    case spv::OpTypeVector:
1853        return get_fundamental_type(src, insn.word(2));
1854    case spv::OpTypeMatrix:
1855        return get_fundamental_type(src, insn.word(2));
1856    case spv::OpTypeArray:
1857        return get_fundamental_type(src, insn.word(2));
1858    case spv::OpTypePointer:
1859        return get_fundamental_type(src, insn.word(3));
1860    case spv::OpTypeImage:
1861        return get_fundamental_type(src, insn.word(2));
1862
1863    default:
1864        return FORMAT_TYPE_UNDEFINED;
1865    }
1866}
1867
1868static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1869    uint32_t bit_pos = u_ffs(stage);
1870    return bit_pos - 1;
1871}
1872
1873static bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1874    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1875     * each binding should be specified only once.
1876     */
1877    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1878    bool pass = true;
1879
1880    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1881        auto desc = &vi->pVertexBindingDescriptions[i];
1882        auto &binding = bindings[desc->binding];
1883        if (binding) {
1884            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1885                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1886                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1887                pass = false;
1888            }
1889        } else {
1890            binding = desc;
1891        }
1892    }
1893
1894    return pass;
1895}
1896
1897static bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
1898                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1899    bool pass = true;
1900
1901    auto inputs = collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, false);
1902
1903    /* Build index by location */
1904    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1905    if (vi) {
1906        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1907            auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1908            for (auto j = 0u; j < num_locations; j++) {
1909                attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1910            }
1911        }
1912    }
1913
1914    auto it_a = attribs.begin();
1915    auto it_b = inputs.begin();
1916    bool used = false;
1917
1918    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1919        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1920        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1921        auto a_first = a_at_end ? 0 : it_a->first;
1922        auto b_first = b_at_end ? 0 : it_b->first.first;
1923        if (!a_at_end && (b_at_end || a_first < b_first)) {
1924            if (!used && log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1925                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1926                        "Vertex attribute at location %d not consumed by VS", a_first)) {
1927                pass = false;
1928            }
1929            used = false;
1930            it_a++;
1931        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1932            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1933                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "VS consumes input at location %d but not provided",
1934                        b_first)) {
1935                pass = false;
1936            }
1937            it_b++;
1938        } else {
1939            unsigned attrib_type = get_format_type(it_a->second->format);
1940            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1941
1942            /* type checking */
1943            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1944                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1945                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1946                            "Attribute type of `%s` at location %d does not match VS input type of `%s`",
1947                            string_VkFormat(it_a->second->format), a_first,
1948                            describe_type(vs, it_b->second.type_id).c_str())) {
1949                    pass = false;
1950                }
1951            }
1952
1953            /* OK! */
1954            used = true;
1955            it_b++;
1956        }
1957    }
1958
1959    return pass;
1960}
1961
1962static bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs,
1963                                                    spirv_inst_iter entrypoint, VkRenderPassCreateInfo const *rpci,
1964                                                    uint32_t subpass_index) {
1965    std::map<uint32_t, VkFormat> color_attachments;
1966    auto subpass = rpci->pSubpasses[subpass_index];
1967    for (auto i = 0u; i < subpass.colorAttachmentCount; ++i) {
1968        uint32_t attachment = subpass.pColorAttachments[i].attachment;
1969        if (attachment == VK_ATTACHMENT_UNUSED)
1970            continue;
1971        if (rpci->pAttachments[attachment].format != VK_FORMAT_UNDEFINED) {
1972            color_attachments[i] = rpci->pAttachments[attachment].format;
1973        }
1974    }
1975
1976    bool pass = true;
1977
1978    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
1979
1980    auto outputs = collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, false);
1981
1982    auto it_a = outputs.begin();
1983    auto it_b = color_attachments.begin();
1984
1985    /* Walk attachment list and outputs together */
1986
1987    while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
1988        bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
1989        bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
1990
1991        if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
1992            if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1993                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1994                        "FS writes to output location %d with no matching attachment", it_a->first.first)) {
1995                pass = false;
1996            }
1997            it_a++;
1998        } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
1999            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2000                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", it_b->first)) {
2001                pass = false;
2002            }
2003            it_b++;
2004        } else {
2005            unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
2006            unsigned att_type = get_format_type(it_b->second);
2007
2008            /* type checking */
2009            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
2010                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2011                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
2012                            "Attachment %d of type `%s` does not match FS output type of `%s`", it_b->first,
2013                            string_VkFormat(it_b->second),
2014                            describe_type(fs, it_a->second.type_id).c_str())) {
2015                    pass = false;
2016                }
2017            }
2018
2019            /* OK! */
2020            it_a++;
2021            it_b++;
2022        }
2023    }
2024
2025    return pass;
2026}
2027
2028/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
2029 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
2030 * for example.
2031 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
2032 *  - NOT the shader input/output interfaces.
2033 *
2034 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
2035 * converting parts of this to be generated from the machine-readable spec instead.
2036 */
2037static std::unordered_set<uint32_t> mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint) {
2038    std::unordered_set<uint32_t> ids;
2039    std::unordered_set<uint32_t> worklist;
2040    worklist.insert(entrypoint.word(2));
2041
2042    while (!worklist.empty()) {
2043        auto id_iter = worklist.begin();
2044        auto id = *id_iter;
2045        worklist.erase(id_iter);
2046
2047        auto insn = src->get_def(id);
2048        if (insn == src->end()) {
2049            /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble
2050             * across all kinds of things here that we may not care about. */
2051            continue;
2052        }
2053
2054        /* try to add to the output set */
2055        if (!ids.insert(id).second) {
2056            continue; /* if we already saw this id, we don't want to walk it again. */
2057        }
2058
2059        switch (insn.opcode()) {
2060        case spv::OpFunction:
2061            /* scan whole body of the function, enlisting anything interesting */
2062            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
2063                switch (insn.opcode()) {
2064                case spv::OpLoad:
2065                case spv::OpAtomicLoad:
2066                case spv::OpAtomicExchange:
2067                case spv::OpAtomicCompareExchange:
2068                case spv::OpAtomicCompareExchangeWeak:
2069                case spv::OpAtomicIIncrement:
2070                case spv::OpAtomicIDecrement:
2071                case spv::OpAtomicIAdd:
2072                case spv::OpAtomicISub:
2073                case spv::OpAtomicSMin:
2074                case spv::OpAtomicUMin:
2075                case spv::OpAtomicSMax:
2076                case spv::OpAtomicUMax:
2077                case spv::OpAtomicAnd:
2078                case spv::OpAtomicOr:
2079                case spv::OpAtomicXor:
2080                    worklist.insert(insn.word(3)); /* ptr */
2081                    break;
2082                case spv::OpStore:
2083                case spv::OpAtomicStore:
2084                    worklist.insert(insn.word(1)); /* ptr */
2085                    break;
2086                case spv::OpAccessChain:
2087                case spv::OpInBoundsAccessChain:
2088                    worklist.insert(insn.word(3)); /* base ptr */
2089                    break;
2090                case spv::OpSampledImage:
2091                case spv::OpImageSampleImplicitLod:
2092                case spv::OpImageSampleExplicitLod:
2093                case spv::OpImageSampleDrefImplicitLod:
2094                case spv::OpImageSampleDrefExplicitLod:
2095                case spv::OpImageSampleProjImplicitLod:
2096                case spv::OpImageSampleProjExplicitLod:
2097                case spv::OpImageSampleProjDrefImplicitLod:
2098                case spv::OpImageSampleProjDrefExplicitLod:
2099                case spv::OpImageFetch:
2100                case spv::OpImageGather:
2101                case spv::OpImageDrefGather:
2102                case spv::OpImageRead:
2103                case spv::OpImage:
2104                case spv::OpImageQueryFormat:
2105                case spv::OpImageQueryOrder:
2106                case spv::OpImageQuerySizeLod:
2107                case spv::OpImageQuerySize:
2108                case spv::OpImageQueryLod:
2109                case spv::OpImageQueryLevels:
2110                case spv::OpImageQuerySamples:
2111                case spv::OpImageSparseSampleImplicitLod:
2112                case spv::OpImageSparseSampleExplicitLod:
2113                case spv::OpImageSparseSampleDrefImplicitLod:
2114                case spv::OpImageSparseSampleDrefExplicitLod:
2115                case spv::OpImageSparseSampleProjImplicitLod:
2116                case spv::OpImageSparseSampleProjExplicitLod:
2117                case spv::OpImageSparseSampleProjDrefImplicitLod:
2118                case spv::OpImageSparseSampleProjDrefExplicitLod:
2119                case spv::OpImageSparseFetch:
2120                case spv::OpImageSparseGather:
2121                case spv::OpImageSparseDrefGather:
2122                case spv::OpImageTexelPointer:
2123                    worklist.insert(insn.word(3)); /* image or sampled image */
2124                    break;
2125                case spv::OpImageWrite:
2126                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
2127                    break;
2128                case spv::OpFunctionCall:
2129                    for (uint32_t i = 3; i < insn.len(); i++) {
2130                        worklist.insert(insn.word(i)); /* fn itself, and all args */
2131                    }
2132                    break;
2133
2134                case spv::OpExtInst:
2135                    for (uint32_t i = 5; i < insn.len(); i++) {
2136                        worklist.insert(insn.word(i)); /* operands to ext inst */
2137                    }
2138                    break;
2139                }
2140            }
2141            break;
2142        }
2143    }
2144
2145    return ids;
2146}
2147
2148static bool validate_push_constant_block_against_pipeline(debug_report_data *report_data,
2149                                                          std::vector<VkPushConstantRange> const *push_constant_ranges,
2150                                                          shader_module const *src, spirv_inst_iter type,
2151                                                          VkShaderStageFlagBits stage) {
2152    bool pass = true;
2153
2154    /* strip off ptrs etc */
2155    type = get_struct_type(src, type, false);
2156    assert(type != src->end());
2157
2158    /* validate directly off the offsets. this isn't quite correct for arrays
2159     * and matrices, but is a good first step. TODO: arrays, matrices, weird
2160     * sizes */
2161    for (auto insn : *src) {
2162        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
2163
2164            if (insn.word(3) == spv::DecorationOffset) {
2165                unsigned offset = insn.word(4);
2166                auto size = 4; /* bytes; TODO: calculate this based on the type */
2167
2168                bool found_range = false;
2169                for (auto const &range : *push_constant_ranges) {
2170                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
2171                        found_range = true;
2172
2173                        if ((range.stageFlags & stage) == 0) {
2174                            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2175                                        __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2176                                        "Push constant range covering variable starting at "
2177                                        "offset %u not accessible from stage %s",
2178                                        offset, string_VkShaderStageFlagBits(stage))) {
2179                                pass = false;
2180                            }
2181                        }
2182
2183                        break;
2184                    }
2185                }
2186
2187                if (!found_range) {
2188                    if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2189                                __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
2190                                "Push constant range covering variable starting at "
2191                                "offset %u not declared in layout",
2192                                offset)) {
2193                        pass = false;
2194                    }
2195                }
2196            }
2197        }
2198    }
2199
2200    return pass;
2201}
2202
2203static bool validate_push_constant_usage(debug_report_data *report_data,
2204                                         std::vector<VkPushConstantRange> const *push_constant_ranges, shader_module const *src,
2205                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2206    bool pass = true;
2207
2208    for (auto id : accessible_ids) {
2209        auto def_insn = src->get_def(id);
2210        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
2211            pass &= validate_push_constant_block_against_pipeline(report_data, push_constant_ranges, src,
2212                                                                  src->get_def(def_insn.word(1)), stage);
2213        }
2214    }
2215
2216    return pass;
2217}
2218
2219// For given pipelineLayout verify that the set_layout_node at slot.first
2220//  has the requested binding at slot.second and return ptr to that binding
2221static VkDescriptorSetLayoutBinding const * get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout, descriptor_slot_t slot) {
2222
2223    if (!pipelineLayout)
2224        return nullptr;
2225
2226    if (slot.first >= pipelineLayout->set_layouts.size())
2227        return nullptr;
2228
2229    return pipelineLayout->set_layouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
2230}
2231
2232// Block of code at start here for managing/tracking Pipeline state that this layer cares about
2233
2234static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
2235
2236// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2237//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2238//   to that same cmd buffer by separate thread are not changing state from underneath us
2239// Track the last cmd buffer touched by this thread
2240
2241static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2242    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2243        if (pCB->drawCount[i])
2244            return true;
2245    }
2246    return false;
2247}
2248
2249// Check object status for selected flag state
2250static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
2251                            DRAW_STATE_ERROR error_code, const char *fail_msg) {
2252    if (!(pNode->status & status_mask)) {
2253        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2254                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS",
2255                       "CB object 0x%" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg);
2256    }
2257    return false;
2258}
2259
2260// Retrieve pipeline node ptr for given pipeline object
2261static PIPELINE_NODE *getPipeline(layer_data const *my_data, VkPipeline pipeline) {
2262    auto it = my_data->pipelineMap.find(pipeline);
2263    if (it == my_data->pipelineMap.end()) {
2264        return nullptr;
2265    }
2266    return it->second;
2267}
2268
2269static RENDER_PASS_NODE *getRenderPass(layer_data const *my_data, VkRenderPass renderpass) {
2270    auto it = my_data->renderPassMap.find(renderpass);
2271    if (it == my_data->renderPassMap.end()) {
2272        return nullptr;
2273    }
2274    return it->second;
2275}
2276
2277static FRAMEBUFFER_NODE *getFramebuffer(const layer_data *my_data, VkFramebuffer framebuffer) {
2278    auto it = my_data->frameBufferMap.find(framebuffer);
2279    if (it == my_data->frameBufferMap.end()) {
2280        return nullptr;
2281    }
2282    return it->second.get();
2283}
2284
2285cvdescriptorset::DescriptorSetLayout const *getDescriptorSetLayout(layer_data const *my_data, VkDescriptorSetLayout dsLayout) {
2286    auto it = my_data->descriptorSetLayoutMap.find(dsLayout);
2287    if (it == my_data->descriptorSetLayoutMap.end()) {
2288        return nullptr;
2289    }
2290    return it->second;
2291}
2292
2293static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *my_data, VkPipelineLayout pipeLayout) {
2294    auto it = my_data->pipelineLayoutMap.find(pipeLayout);
2295    if (it == my_data->pipelineLayoutMap.end()) {
2296        return nullptr;
2297    }
2298    return &it->second;
2299}
2300
2301// Return true if for a given PSO, the given state enum is dynamic, else return false
2302static bool isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
2303    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2304        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2305            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2306                return true;
2307        }
2308    }
2309    return false;
2310}
2311
2312// Validate state stored as flags at time of draw call
2313static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe, bool indexedDraw) {
2314    bool result = false;
2315    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
2316        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2317         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
2318        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2319                                  DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
2320    }
2321    if (pPipe->graphicsPipelineCI.pRasterizationState &&
2322        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
2323        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2324                                  DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
2325    }
2326    if (pPipe->blendConstantsEnabled) {
2327        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2328                                  DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer");
2329    }
2330    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2331        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
2332        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2333                                  DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
2334    }
2335    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2336        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
2337        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2338                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
2339        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2340                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
2341        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2342                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
2343    }
2344    if (indexedDraw) {
2345        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2346                                  DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2347                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2348    }
2349    return result;
2350}
2351
2352// Verify attachment reference compatibility according to spec
2353//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2354//  If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions
2355//   to make sure that format and samples counts match.
2356//  If not, they are not compatible.
2357static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2358                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2359                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2360                                             const VkAttachmentDescription *pSecondaryAttachments) {
2361    // Check potential NULL cases first to avoid nullptr issues later
2362    if (pPrimary == nullptr) {
2363        if (pSecondary == nullptr) {
2364            return true;
2365        }
2366        return false;
2367    } else if (pSecondary == nullptr) {
2368        return false;
2369    }
2370    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2371        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2372            return true;
2373    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2374        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2375            return true;
2376    } else { // Format and sample count must match
2377        if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2378            return true;
2379        } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2380            return false;
2381        }
2382        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2383             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2384            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2385             pSecondaryAttachments[pSecondary[index].attachment].samples))
2386            return true;
2387    }
2388    // Format and sample counts didn't match
2389    return false;
2390}
2391// TODO : Scrub verify_renderpass_compatibility() and validateRenderPassCompatibility() and unify them and/or share code
2392// For given primary RenderPass object and secondry RenderPassCreateInfo, verify that they're compatible
2393static bool verify_renderpass_compatibility(const layer_data *my_data, const VkRenderPassCreateInfo *primaryRPCI,
2394                                            const VkRenderPassCreateInfo *secondaryRPCI, string &errorMsg) {
2395    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2396        stringstream errorStr;
2397        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2398                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2399        errorMsg = errorStr.str();
2400        return false;
2401    }
2402    uint32_t spIndex = 0;
2403    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2404        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2405        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2406        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2407        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2408        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2409            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2410                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2411                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2412                stringstream errorStr;
2413                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2414                errorMsg = errorStr.str();
2415                return false;
2416            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2417                                                         primaryColorCount, primaryRPCI->pAttachments,
2418                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2419                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2420                stringstream errorStr;
2421                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2422                errorMsg = errorStr.str();
2423                return false;
2424            }
2425        }
2426
2427        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2428                                              1, primaryRPCI->pAttachments,
2429                                              secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2430                                              1, secondaryRPCI->pAttachments)) {
2431            stringstream errorStr;
2432            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2433            errorMsg = errorStr.str();
2434            return false;
2435        }
2436
2437        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2438        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2439        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2440        for (uint32_t i = 0; i < inputMax; ++i) {
2441            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2442                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2443                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2444                stringstream errorStr;
2445                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2446                errorMsg = errorStr.str();
2447                return false;
2448            }
2449        }
2450    }
2451    return true;
2452}
2453
2454// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
2455// pipelineLayout[layoutIndex]
2456static bool verify_set_layout_compatibility(layer_data *my_data, const cvdescriptorset::DescriptorSet *pSet,
2457                                            PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
2458                                            string &errorMsg) {
2459    auto num_sets = pipeline_layout->set_layouts.size();
2460    if (layoutIndex >= num_sets) {
2461        stringstream errorStr;
2462        errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
2463                 << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
2464                 << layoutIndex;
2465        errorMsg = errorStr.str();
2466        return false;
2467    }
2468    auto layout_node = pipeline_layout->set_layouts[layoutIndex];
2469    return pSet->IsCompatible(layout_node, &errorMsg);
2470}
2471
2472// Validate that data for each specialization entry is fully contained within the buffer.
2473static bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) {
2474    bool pass = true;
2475
2476    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2477
2478    if (spec) {
2479        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2480            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2481                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2482                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2483                            "Specialization entry %u (for constant id %u) references memory outside provided "
2484                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2485                            " bytes provided)",
2486                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2487                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2488
2489                    pass = false;
2490                }
2491            }
2492        }
2493    }
2494
2495    return pass;
2496}
2497
2498static bool descriptor_type_match(shader_module const *module, uint32_t type_id,
2499                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2500    auto type = module->get_def(type_id);
2501
2502    descriptor_count = 1;
2503
2504    /* Strip off any array or ptrs. Where we remove array levels, adjust the
2505     * descriptor count for each dimension. */
2506    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2507        if (type.opcode() == spv::OpTypeArray) {
2508            descriptor_count *= get_constant_value(module, type.word(3));
2509            type = module->get_def(type.word(2));
2510        }
2511        else {
2512            type = module->get_def(type.word(3));
2513        }
2514    }
2515
2516    switch (type.opcode()) {
2517    case spv::OpTypeStruct: {
2518        for (auto insn : *module) {
2519            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2520                if (insn.word(2) == spv::DecorationBlock) {
2521                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2522                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2523                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2524                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2525                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2526                }
2527            }
2528        }
2529
2530        /* Invalid */
2531        return false;
2532    }
2533
2534    case spv::OpTypeSampler:
2535        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER ||
2536            descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2537
2538    case spv::OpTypeSampledImage:
2539        if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2540            /* Slight relaxation for some GLSL historical madness: samplerBuffer
2541             * doesn't really have a sampler, and a texel buffer descriptor
2542             * doesn't really provide one. Allow this slight mismatch.
2543             */
2544            auto image_type = module->get_def(type.word(2));
2545            auto dim = image_type.word(3);
2546            auto sampled = image_type.word(7);
2547            return dim == spv::DimBuffer && sampled == 1;
2548        }
2549        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2550
2551    case spv::OpTypeImage: {
2552        /* Many descriptor types backing image types-- depends on dimension
2553         * and whether the image will be used with a sampler. SPIRV for
2554         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2555         * runtime is unacceptable.
2556         */
2557        auto dim = type.word(3);
2558        auto sampled = type.word(7);
2559
2560        if (dim == spv::DimSubpassData) {
2561            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2562        } else if (dim == spv::DimBuffer) {
2563            if (sampled == 1) {
2564                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2565            } else {
2566                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2567            }
2568        } else if (sampled == 1) {
2569            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ||
2570                descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2571        } else {
2572            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2573        }
2574    }
2575
2576    /* We shouldn't really see any other junk types -- but if we do, they're
2577     * a mismatch.
2578     */
2579    default:
2580        return false; /* Mismatch */
2581    }
2582}
2583
2584static bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) {
2585    if (!feature) {
2586        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2587                    __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2588                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2589                    "enabled on the device",
2590                    feature_name)) {
2591            return false;
2592        }
2593    }
2594
2595    return true;
2596}
2597
2598static bool validate_shader_capabilities(debug_report_data *report_data, shader_module const *src,
2599                                         VkPhysicalDeviceFeatures const *enabledFeatures) {
2600    bool pass = true;
2601
2602
2603    for (auto insn : *src) {
2604        if (insn.opcode() == spv::OpCapability) {
2605            switch (insn.word(1)) {
2606            case spv::CapabilityMatrix:
2607            case spv::CapabilityShader:
2608            case spv::CapabilityInputAttachment:
2609            case spv::CapabilitySampled1D:
2610            case spv::CapabilityImage1D:
2611            case spv::CapabilitySampledBuffer:
2612            case spv::CapabilityImageBuffer:
2613            case spv::CapabilityImageQuery:
2614            case spv::CapabilityDerivativeControl:
2615                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2616                break;
2617
2618            case spv::CapabilityGeometry:
2619                pass &= require_feature(report_data, enabledFeatures->geometryShader, "geometryShader");
2620                break;
2621
2622            case spv::CapabilityTessellation:
2623                pass &= require_feature(report_data, enabledFeatures->tessellationShader, "tessellationShader");
2624                break;
2625
2626            case spv::CapabilityFloat64:
2627                pass &= require_feature(report_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2628                break;
2629
2630            case spv::CapabilityInt64:
2631                pass &= require_feature(report_data, enabledFeatures->shaderInt64, "shaderInt64");
2632                break;
2633
2634            case spv::CapabilityTessellationPointSize:
2635            case spv::CapabilityGeometryPointSize:
2636                pass &= require_feature(report_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2637                                        "shaderTessellationAndGeometryPointSize");
2638                break;
2639
2640            case spv::CapabilityImageGatherExtended:
2641                pass &= require_feature(report_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2642                break;
2643
2644            case spv::CapabilityStorageImageMultisample:
2645                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2646                break;
2647
2648            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2649                pass &= require_feature(report_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2650                                        "shaderUniformBufferArrayDynamicIndexing");
2651                break;
2652
2653            case spv::CapabilitySampledImageArrayDynamicIndexing:
2654                pass &= require_feature(report_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2655                                        "shaderSampledImageArrayDynamicIndexing");
2656                break;
2657
2658            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2659                pass &= require_feature(report_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2660                                        "shaderStorageBufferArrayDynamicIndexing");
2661                break;
2662
2663            case spv::CapabilityStorageImageArrayDynamicIndexing:
2664                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2665                                        "shaderStorageImageArrayDynamicIndexing");
2666                break;
2667
2668            case spv::CapabilityClipDistance:
2669                pass &= require_feature(report_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2670                break;
2671
2672            case spv::CapabilityCullDistance:
2673                pass &= require_feature(report_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2674                break;
2675
2676            case spv::CapabilityImageCubeArray:
2677                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2678                break;
2679
2680            case spv::CapabilitySampleRateShading:
2681                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2682                break;
2683
2684            case spv::CapabilitySparseResidency:
2685                pass &= require_feature(report_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2686                break;
2687
2688            case spv::CapabilityMinLod:
2689                pass &= require_feature(report_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2690                break;
2691
2692            case spv::CapabilitySampledCubeArray:
2693                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2694                break;
2695
2696            case spv::CapabilityImageMSArray:
2697                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2698                break;
2699
2700            case spv::CapabilityStorageImageExtendedFormats:
2701                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageExtendedFormats,
2702                                        "shaderStorageImageExtendedFormats");
2703                break;
2704
2705            case spv::CapabilityInterpolationFunction:
2706                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2707                break;
2708
2709            case spv::CapabilityStorageImageReadWithoutFormat:
2710                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2711                                        "shaderStorageImageReadWithoutFormat");
2712                break;
2713
2714            case spv::CapabilityStorageImageWriteWithoutFormat:
2715                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2716                                        "shaderStorageImageWriteWithoutFormat");
2717                break;
2718
2719            case spv::CapabilityMultiViewport:
2720                pass &= require_feature(report_data, enabledFeatures->multiViewport, "multiViewport");
2721                break;
2722
2723            default:
2724                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2725                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2726                            "Shader declares capability %u, not supported in Vulkan.",
2727                            insn.word(1)))
2728                    pass = false;
2729                break;
2730            }
2731        }
2732    }
2733
2734    return pass;
2735}
2736
2737
2738static uint32_t descriptor_type_to_reqs(shader_module const *module, uint32_t type_id) {
2739    auto type = module->get_def(type_id);
2740
2741    while (true) {
2742        switch (type.opcode()) {
2743        case spv::OpTypeArray:
2744        case spv::OpTypeSampledImage:
2745            type = module->get_def(type.word(2));
2746            break;
2747        case spv::OpTypePointer:
2748            type = module->get_def(type.word(3));
2749            break;
2750        case spv::OpTypeImage: {
2751            auto dim = type.word(3);
2752            auto arrayed = type.word(5);
2753            auto msaa = type.word(6);
2754
2755            switch (dim) {
2756            case spv::Dim1D:
2757                return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_1D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_1D;
2758            case spv::Dim2D:
2759                return (msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE) |
2760                    (arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_2D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_2D);
2761            case spv::Dim3D:
2762                return DESCRIPTOR_REQ_VIEW_TYPE_3D;
2763            case spv::DimCube:
2764                return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_CUBE_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_CUBE;
2765            case spv::DimSubpassData:
2766                return msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE;
2767            default:  // buffer, etc.
2768                return 0;
2769            }
2770        }
2771        default:
2772            return 0;
2773        }
2774    }
2775}
2776
2777
2778static bool validate_pipeline_shader_stage(debug_report_data *report_data,
2779                                           VkPipelineShaderStageCreateInfo const *pStage,
2780                                           PIPELINE_NODE *pipeline,
2781                                           shader_module **out_module,
2782                                           spirv_inst_iter *out_entrypoint,
2783                                           VkPhysicalDeviceFeatures const *enabledFeatures,
2784                                           std::unordered_map<VkShaderModule,
2785                                           std::unique_ptr<shader_module>> const &shaderModuleMap) {
2786    bool pass = true;
2787    auto module_it = shaderModuleMap.find(pStage->module);
2788    auto module = *out_module = module_it->second.get();
2789
2790    /* find the entrypoint */
2791    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2792    if (entrypoint == module->end()) {
2793        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2794                    __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2795                    "No entrypoint found named `%s` for stage %s", pStage->pName,
2796                    string_VkShaderStageFlagBits(pStage->stage))) {
2797            return false;   // no point continuing beyond here, any analysis is just going to be garbage.
2798        }
2799    }
2800
2801    /* validate shader capabilities against enabled device features */
2802    pass &= validate_shader_capabilities(report_data, module, enabledFeatures);
2803
2804    /* mark accessible ids */
2805    auto accessible_ids = mark_accessible_ids(module, entrypoint);
2806
2807    /* validate descriptor set layout against what the entrypoint actually uses */
2808    auto descriptor_uses = collect_interface_by_descriptor_slot(report_data, module, accessible_ids);
2809
2810    auto pipelineLayout = pipeline->pipeline_layout;
2811
2812    pass &= validate_specialization_offsets(report_data, pStage);
2813    pass &= validate_push_constant_usage(report_data, &pipelineLayout.push_constant_ranges, module, accessible_ids, pStage->stage);
2814
2815    /* validate descriptor use */
2816    for (auto use : descriptor_uses) {
2817        // While validating shaders capture which slots are used by the pipeline
2818        auto & reqs = pipeline->active_slots[use.first.first][use.first.second];
2819        reqs = descriptor_req(reqs | descriptor_type_to_reqs(module, use.second.type_id));
2820
2821        /* verify given pipelineLayout has requested setLayout with requested binding */
2822        const auto &binding = get_descriptor_binding(&pipelineLayout, use.first);
2823        unsigned required_descriptor_count;
2824
2825        if (!binding) {
2826            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2827                        __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2828                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2829                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2830                pass = false;
2831            }
2832        } else if (~binding->stageFlags & pStage->stage) {
2833            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2834                        /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2835                        "Shader uses descriptor slot %u.%u (used "
2836                        "as type `%s`) but descriptor not "
2837                        "accessible from stage %s",
2838                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2839                        string_VkShaderStageFlagBits(pStage->stage))) {
2840                pass = false;
2841            }
2842        } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType,
2843                                          /*out*/ required_descriptor_count)) {
2844            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2845                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", "Type mismatch on descriptor slot "
2846                                                                       "%u.%u (used as type `%s`) but "
2847                                                                       "descriptor of type %s",
2848                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2849                        string_VkDescriptorType(binding->descriptorType))) {
2850                pass = false;
2851            }
2852        } else if (binding->descriptorCount < required_descriptor_count) {
2853            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2854                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2855                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2856                        required_descriptor_count, use.first.first, use.first.second,
2857                        describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
2858                pass = false;
2859            }
2860        }
2861    }
2862
2863    /* validate use of input attachments against subpass structure */
2864    if (pStage->stage == VK_SHADER_STAGE_FRAGMENT_BIT) {
2865        auto input_attachment_uses = collect_interface_by_input_attachment_index(report_data, module, accessible_ids);
2866
2867        auto rpci = pipeline->render_pass_ci.ptr();
2868        auto subpass = pipeline->graphicsPipelineCI.subpass;
2869
2870        for (auto use : input_attachment_uses) {
2871            auto input_attachments = rpci->pSubpasses[subpass].pInputAttachments;
2872            auto index = (input_attachments && use.first < rpci->pSubpasses[subpass].inputAttachmentCount) ?
2873                    input_attachments[use.first].attachment : VK_ATTACHMENT_UNUSED;
2874
2875            if (index == VK_ATTACHMENT_UNUSED) {
2876                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2877                            SHADER_CHECKER_MISSING_INPUT_ATTACHMENT, "SC",
2878                            "Shader consumes input attachment index %d but not provided in subpass",
2879                            use.first)) {
2880                    pass = false;
2881                }
2882            }
2883            else if (get_format_type(rpci->pAttachments[index].format) !=
2884                    get_fundamental_type(module, use.second.type_id)) {
2885                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2886                            SHADER_CHECKER_INPUT_ATTACHMENT_TYPE_MISMATCH, "SC",
2887                            "Subpass input attachment %u format of %s does not match type used in shader `%s`",
2888                            use.first, string_VkFormat(rpci->pAttachments[index].format),
2889                            describe_type(module, use.second.type_id).c_str())) {
2890                    pass = false;
2891                }
2892            }
2893        }
2894    }
2895
2896    return pass;
2897}
2898
2899
2900// Validate that the shaders used by the given pipeline and store the active_slots
2901//  that are actually used by the pipeline into pPipeline->active_slots
2902static bool validate_and_capture_pipeline_shader_state(debug_report_data *report_data, PIPELINE_NODE *pPipeline,
2903                                                       VkPhysicalDeviceFeatures const *enabledFeatures,
2904                                                       std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
2905    auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr();
2906    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2907    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2908
2909    shader_module *shaders[5];
2910    memset(shaders, 0, sizeof(shaders));
2911    spirv_inst_iter entrypoints[5];
2912    memset(entrypoints, 0, sizeof(entrypoints));
2913    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2914    bool pass = true;
2915
2916    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2917        auto pStage = &pCreateInfo->pStages[i];
2918        auto stage_id = get_shader_stage_id(pStage->stage);
2919        pass &= validate_pipeline_shader_stage(report_data, pStage, pPipeline,
2920                                               &shaders[stage_id], &entrypoints[stage_id],
2921                                               enabledFeatures, shaderModuleMap);
2922    }
2923
2924    // if the shader stages are no good individually, cross-stage validation is pointless.
2925    if (!pass)
2926        return false;
2927
2928    vi = pCreateInfo->pVertexInputState;
2929
2930    if (vi) {
2931        pass &= validate_vi_consistency(report_data, vi);
2932    }
2933
2934    if (shaders[vertex_stage]) {
2935        pass &= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2936    }
2937
2938    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2939    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2940
2941    while (!shaders[producer] && producer != fragment_stage) {
2942        producer++;
2943        consumer++;
2944    }
2945
2946    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2947        assert(shaders[producer]);
2948        if (shaders[consumer]) {
2949            pass &= validate_interface_between_stages(report_data,
2950                                                      shaders[producer], entrypoints[producer], &shader_stage_attribs[producer],
2951                                                      shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]);
2952
2953            producer = consumer;
2954        }
2955    }
2956
2957    if (shaders[fragment_stage]) {
2958        pass &= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage],
2959                                                        pPipeline->render_pass_ci.ptr(), pCreateInfo->subpass);
2960    }
2961
2962    return pass;
2963}
2964
2965static bool validate_compute_pipeline(debug_report_data *report_data, PIPELINE_NODE *pPipeline, VkPhysicalDeviceFeatures const *enabledFeatures,
2966                                      std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
2967    auto pCreateInfo = pPipeline->computePipelineCI.ptr();
2968
2969    shader_module *module;
2970    spirv_inst_iter entrypoint;
2971
2972    return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline,
2973                                          &module, &entrypoint, enabledFeatures, shaderModuleMap);
2974}
2975// Return Set node ptr for specified set or else NULL
2976cvdescriptorset::DescriptorSet *getSetNode(const layer_data *my_data, VkDescriptorSet set) {
2977    auto set_it = my_data->setMap.find(set);
2978    if (set_it == my_data->setMap.end()) {
2979        return NULL;
2980    }
2981    return set_it->second;
2982}
2983// For the given command buffer, verify and update the state for activeSetBindingsPairs
2984//  This includes:
2985//  1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound.
2986//     To be valid, the dynamic offset combined with the offset and range from its
2987//     descriptor update must not overflow the size of its buffer being updated
2988//  2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images
2989//  3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers
2990static bool validate_and_update_drawtime_descriptor_state(
2991    layer_data *dev_data, GLOBAL_CB_NODE *pCB,
2992    const vector<std::tuple<cvdescriptorset::DescriptorSet *, std::map<uint32_t, descriptor_req>, std::vector<uint32_t> const *>>
2993        &activeSetBindingsPairs,
2994    const char *function) {
2995    bool result = false;
2996    for (auto set_bindings_pair : activeSetBindingsPairs) {
2997        cvdescriptorset::DescriptorSet *set_node = std::get<0>(set_bindings_pair);
2998        std::string err_str;
2999        if (!set_node->ValidateDrawState(std::get<1>(set_bindings_pair), *std::get<2>(set_bindings_pair),
3000                                         &err_str)) {
3001            // Report error here
3002            auto set = set_node->GetSet();
3003            result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3004                              reinterpret_cast<const uint64_t &>(set), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
3005                              "DS 0x%" PRIxLEAST64 " encountered the following validation error at %s() time: %s",
3006                              reinterpret_cast<const uint64_t &>(set), function, err_str.c_str());
3007        }
3008        set_node->GetStorageUpdates(std::get<1>(set_bindings_pair), &pCB->updateBuffers, &pCB->updateImages);
3009    }
3010    return result;
3011}
3012
3013// For given pipeline, return number of MSAA samples, or one if MSAA disabled
3014static VkSampleCountFlagBits getNumSamples(PIPELINE_NODE const *pipe) {
3015    if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
3016        VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
3017        return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
3018    }
3019    return VK_SAMPLE_COUNT_1_BIT;
3020}
3021
3022static void list_bits(std::ostream& s, uint32_t bits) {
3023    for (int i = 0; i < 32 && bits; i++) {
3024        if (bits & (1 << i)) {
3025            s << i;
3026            bits &= ~(1 << i);
3027            if (bits) {
3028                s << ",";
3029            }
3030        }
3031    }
3032}
3033
3034// Validate draw-time state related to the PSO
3035static bool validatePipelineDrawtimeState(layer_data const *my_data,
3036                                          LAST_BOUND_STATE const &state,
3037                                          const GLOBAL_CB_NODE *pCB,
3038                                          PIPELINE_NODE const *pPipeline) {
3039    bool skip_call = false;
3040
3041    // Verify Vtx binding
3042    if (pPipeline->vertexBindingDescriptions.size() > 0) {
3043        for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
3044            auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
3045            if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
3046                (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
3047                skip_call |= log_msg(
3048                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3049                    DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
3050                    "The Pipeline State Object (0x%" PRIxLEAST64 ") expects that this Command Buffer's vertex binding Index %u "
3051                    "should be set via vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct "
3052                    "at index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
3053                    (uint64_t)state.pipeline_node->pipeline, vertex_binding, i, vertex_binding);
3054            }
3055        }
3056    } else {
3057        if (!pCB->currentDrawData.buffers.empty()) {
3058            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
3059                                 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
3060                                 "Vertex buffers are bound to command buffer (0x%" PRIxLEAST64
3061                                 ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
3062                                 (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline_node->pipeline);
3063        }
3064    }
3065    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
3066    // Skip check if rasterization is disabled or there is no viewport.
3067    if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
3068         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
3069        pPipeline->graphicsPipelineCI.pViewportState) {
3070        bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3071        bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3072
3073        if (dynViewport) {
3074            auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
3075            auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
3076            if (missingViewportMask) {
3077                std::stringstream ss;
3078                ss << "Dynamic viewport(s) ";
3079                list_bits(ss, missingViewportMask);
3080                ss << " are used by PSO, but were not provided via calls to vkCmdSetViewport().";
3081                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3082                                     __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3083                                     "%s", ss.str().c_str());
3084            }
3085        }
3086
3087        if (dynScissor) {
3088            auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
3089            auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
3090            if (missingScissorMask) {
3091                std::stringstream ss;
3092                ss << "Dynamic scissor(s) ";
3093                list_bits(ss, missingScissorMask);
3094                ss << " are used by PSO, but were not provided via calls to vkCmdSetScissor().";
3095                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3096                                     __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3097                                     "%s", ss.str().c_str());
3098            }
3099        }
3100    }
3101
3102    // Verify that any MSAA request in PSO matches sample# in bound FB
3103    // Skip the check if rasterization is disabled.
3104    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3105        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3106        VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
3107        if (pCB->activeRenderPass) {
3108            const VkRenderPassCreateInfo *render_pass_info = pCB->activeRenderPass->pCreateInfo;
3109            const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
3110            uint32_t i;
3111
3112            const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
3113            if ((color_blend_state != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) &&
3114                (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount)) {
3115                skip_call |=
3116                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3117                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
3118                                "Render pass subpass %u mismatch with blending state defined and blend state attachment "
3119                                "count %u while subpass color attachment count %u in Pipeline (0x%" PRIxLEAST64 ")!  These "
3120                                "must be the same at draw-time.",
3121                                pCB->activeSubpass, color_blend_state->attachmentCount, subpass_desc->colorAttachmentCount,
3122                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
3123            }
3124
3125            unsigned subpass_num_samples = 0;
3126
3127            for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
3128                auto attachment = subpass_desc->pColorAttachments[i].attachment;
3129                if (attachment != VK_ATTACHMENT_UNUSED)
3130                    subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
3131            }
3132
3133            if (subpass_desc->pDepthStencilAttachment &&
3134                subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3135                auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
3136                subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
3137            }
3138
3139            if (subpass_num_samples && static_cast<unsigned>(pso_num_samples) != subpass_num_samples) {
3140                skip_call |=
3141                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3142                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3143                                "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
3144                                ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
3145                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples,
3146                                reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples);
3147            }
3148        } else {
3149            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3150                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3151                                 "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
3152                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
3153        }
3154    }
3155    // Verify that PSO creation renderPass is compatible with active renderPass
3156    if (pCB->activeRenderPass) {
3157        std::string err_string;
3158        if ((pCB->activeRenderPass->renderPass != pPipeline->graphicsPipelineCI.renderPass) &&
3159            !verify_renderpass_compatibility(my_data, pCB->activeRenderPass->pCreateInfo, pPipeline->render_pass_ci.ptr(),
3160                                             err_string)) {
3161            // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
3162            skip_call |=
3163                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3164                        reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
3165                        "At Draw time the active render pass (0x%" PRIxLEAST64 ") is incompatible w/ gfx pipeline "
3166                        "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
3167                        reinterpret_cast<uint64_t &>(pCB->activeRenderPass->renderPass), reinterpret_cast<uint64_t &>(pPipeline),
3168                        reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
3169        }
3170    }
3171    // TODO : Add more checks here
3172
3173    return skip_call;
3174}
3175
3176// Validate overall state at the time of a draw call
3177static bool validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *cb_node, const bool indexedDraw,
3178                                           const VkPipelineBindPoint bindPoint, const char *function) {
3179    bool result = false;
3180    auto const &state = cb_node->lastBound[bindPoint];
3181    PIPELINE_NODE *pPipe = state.pipeline_node;
3182    if (nullptr == pPipe) {
3183        result |= log_msg(
3184            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
3185            DRAWSTATE_INVALID_PIPELINE, "DS",
3186            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
3187        // Early return as any further checks below will be busted w/o a pipeline
3188        if (result)
3189            return true;
3190    }
3191    // First check flag states
3192    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
3193        result = validate_draw_state_flags(my_data, cb_node, pPipe, indexedDraw);
3194
3195    // Now complete other state checks
3196    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
3197        string errorString;
3198        auto pipeline_layout = pPipe->pipeline_layout;
3199
3200        // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
3201        vector<std::tuple<cvdescriptorset::DescriptorSet *, std::map<uint32_t, descriptor_req>, std::vector<uint32_t> const *>>
3202            activeSetBindingsPairs;
3203        for (auto & setBindingPair : pPipe->active_slots) {
3204            uint32_t setIndex = setBindingPair.first;
3205            // If valid set is not bound throw an error
3206            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
3207                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3208                                  DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
3209                                  "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline,
3210                                  setIndex);
3211            } else if (!verify_set_layout_compatibility(my_data, state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex,
3212                                                        errorString)) {
3213                // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
3214                VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
3215                result |=
3216                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3217                            (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
3218                            "VkDescriptorSet (0x%" PRIxLEAST64
3219                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
3220                            reinterpret_cast<uint64_t &>(setHandle), setIndex, reinterpret_cast<uint64_t &>(pipeline_layout.layout),
3221                            errorString.c_str());
3222            } else { // Valid set is bound and layout compatible, validate that it's updated
3223                // Pull the set node
3224                cvdescriptorset::DescriptorSet *pSet = state.boundDescriptorSets[setIndex];
3225                // Gather active bindings
3226                std::unordered_set<uint32_t> bindings;
3227                for (auto binding : setBindingPair.second) {
3228                    bindings.insert(binding.first);
3229                }
3230                // Bind this set and its active descriptor resources to the command buffer
3231                pSet->BindCommandBuffer(cb_node, bindings);
3232                // Save vector of all active sets to verify dynamicOffsets below
3233                activeSetBindingsPairs.push_back(std::make_tuple(pSet, setBindingPair.second, &state.dynamicOffsets[setIndex]));
3234                // Make sure set has been updated if it has no immutable samplers
3235                //  If it has immutable samplers, we'll flag error later as needed depending on binding
3236                if (!pSet->IsUpdated()) {
3237                    for (auto binding : bindings) {
3238                        if (!pSet->GetImmutableSamplerPtrFromBinding(binding)) {
3239                            result |= log_msg(
3240                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3241                                (uint64_t)pSet->GetSet(), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
3242                                "DS 0x%" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
3243                                "this will result in undefined behavior.",
3244                                (uint64_t)pSet->GetSet());
3245                        }
3246                    }
3247                }
3248            }
3249        }
3250        // For given active slots, verify any dynamic descriptors and record updated images & buffers
3251        result |= validate_and_update_drawtime_descriptor_state(my_data, cb_node, activeSetBindingsPairs, function);
3252    }
3253
3254    // Check general pipeline state that needs to be validated at drawtime
3255    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
3256        result |= validatePipelineDrawtimeState(my_data, state, cb_node, pPipe);
3257
3258    return result;
3259}
3260
3261// Validate HW line width capabilities prior to setting requested line width.
3262static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) {
3263    bool skip_call = false;
3264
3265    // First check to see if the physical device supports wide lines.
3266    if ((VK_FALSE == my_data->phys_dev_properties.features.wideLines) && (1.0f != lineWidth)) {
3267        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__,
3268                             dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature "
3269                                            "not supported/enabled so lineWidth must be 1.0f!",
3270                             lineWidth);
3271    } else {
3272        // Otherwise, make sure the width falls in the valid range.
3273        if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
3274            (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
3275            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target,
3276                                 __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width "
3277                                                          "to between [%f, %f]!",
3278                                 lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0],
3279                                 my_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
3280        }
3281    }
3282
3283    return skip_call;
3284}
3285
3286// Verify that create state for a pipeline is valid
3287static bool verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
3288                                      int pipelineIndex) {
3289    bool skip_call = false;
3290
3291    PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
3292
3293    // If create derivative bit is set, check that we've specified a base
3294    // pipeline correctly, and that the base pipeline was created to allow
3295    // derivatives.
3296    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3297        PIPELINE_NODE *pBasePipeline = nullptr;
3298        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3299              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3300            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3301                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3302                                 "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3303        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3304            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3305                skip_call |=
3306                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3307                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3308                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
3309            } else {
3310                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3311            }
3312        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3313            pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3314        }
3315
3316        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3317            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3318                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3319                                 "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3320        }
3321    }
3322
3323    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3324        if (!my_data->phys_dev_properties.features.independentBlend) {
3325            if (pPipeline->attachments.size() > 1) {
3326                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3327                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3328                    // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
3329                    // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
3330                    // only attachment state, so memcmp is best suited for the comparison
3331                    if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
3332                               sizeof(pAttachments[0]))) {
3333                        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3334                                             __LINE__, DRAWSTATE_INDEPENDENT_BLEND, "DS",
3335                                             "Invalid Pipeline CreateInfo: If independent blend feature not "
3336                                             "enabled, all elements of pAttachments must be identical");
3337                        break;
3338                    }
3339                }
3340            }
3341        }
3342        if (!my_data->phys_dev_properties.features.logicOp &&
3343            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3344            skip_call |=
3345                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3346                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
3347                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
3348        }
3349        if ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable == VK_TRUE) &&
3350            ((pPipeline->graphicsPipelineCI.pColorBlendState->logicOp < VK_LOGIC_OP_CLEAR) ||
3351             (pPipeline->graphicsPipelineCI.pColorBlendState->logicOp > VK_LOGIC_OP_SET))) {
3352            skip_call |=
3353                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3354                        DRAWSTATE_INVALID_LOGIC_OP, "DS",
3355                        "Invalid Pipeline CreateInfo: If logicOpEnable is VK_TRUE, logicOp must be a valid VkLogicOp value");
3356        }
3357    }
3358
3359    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3360    // produces nonsense errors that confuse users. Other layers should already
3361    // emit errors for renderpass being invalid.
3362    auto renderPass = getRenderPass(my_data, pPipeline->graphicsPipelineCI.renderPass);
3363    if (renderPass &&
3364        pPipeline->graphicsPipelineCI.subpass >= renderPass->pCreateInfo->subpassCount) {
3365        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3366                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3367                                                                            "is out of range for this renderpass (0..%u)",
3368                             pPipeline->graphicsPipelineCI.subpass, renderPass->pCreateInfo->subpassCount - 1);
3369    }
3370
3371    if (!validate_and_capture_pipeline_shader_state(my_data->report_data, pPipeline, &my_data->phys_dev_properties.features,
3372                                                    my_data->shaderModuleMap)) {
3373        skip_call = true;
3374    }
3375    // Each shader's stage must be unique
3376    if (pPipeline->duplicate_shaders) {
3377        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
3378            if (pPipeline->duplicate_shaders & stage) {
3379                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3380                                     __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3381                                     "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
3382                                     string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
3383            }
3384        }
3385    }
3386    // VS is required
3387    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3388        skip_call |=
3389            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3390                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3391    }
3392    // Either both or neither TC/TE shaders should be defined
3393    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3394        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3395        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3396                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3397                             "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3398    }
3399    // Compute shaders should be specified independent of Gfx shaders
3400    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3401        (pPipeline->active_shaders &
3402         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3403          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3404        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3405                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3406                             "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3407    }
3408    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3409    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3410    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3411        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3412         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3413        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3414                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3415                                                                            "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3416                                                                            "topology for tessellation pipelines");
3417    }
3418    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3419        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3420        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3421            skip_call |=
3422                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3423                        DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3424                                                                       "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3425                                                                       "topology is only valid for tessellation pipelines");
3426        }
3427        if (!pPipeline->graphicsPipelineCI.pTessellationState) {
3428            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3429                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3430                                 "Invalid Pipeline CreateInfo State: "
3431                                 "pTessellationState is NULL when VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3432                                 "topology used. pTessellationState must not be NULL in this case.");
3433        } else if (!pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints ||
3434                   (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints > 32)) {
3435            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3436                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3437                                                                                "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3438                                                                                "topology used with patchControlPoints value %u."
3439                                                                                " patchControlPoints should be >0 and <=32.",
3440                                 pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints);
3441        }
3442    }
3443    // If a rasterization state is provided, make sure that the line width conforms to the HW.
3444    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3445        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
3446            skip_call |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, reinterpret_cast<uint64_t &>(pPipeline),
3447                                         pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
3448        }
3449    }
3450    // Viewport state must be included if rasterization is enabled.
3451    // If the viewport state is included, the viewport and scissor counts should always match.
3452    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3453    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3454        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3455        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3456            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3457                                 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3458                                                                            "and scissors are dynamic PSO must include "
3459                                                                            "viewportCount and scissorCount in pViewportState.");
3460        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3461                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3462            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3463                                 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3464                                 "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3465                                 pPipeline->graphicsPipelineCI.pViewportState->viewportCount,
3466                                 pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3467        } else {
3468            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3469            bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3470            bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3471            if (!dynViewport) {
3472                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3473                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3474                    skip_call |=
3475                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3476                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3477                                "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3478                                "must either include pViewports data, or include viewport in pDynamicState and set it with "
3479                                "vkCmdSetViewport().",
3480                                pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3481                }
3482            }
3483            if (!dynScissor) {
3484                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3485                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3486                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3487                                         __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3488                                         "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3489                                         "must either include pScissors data, or include scissor in pDynamicState and set it with "
3490                                         "vkCmdSetScissor().",
3491                                         pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3492                }
3493            }
3494        }
3495
3496        // If rasterization is not disabled, and subpass uses a depth/stencil
3497        // attachment, pDepthStencilState must be a pointer to a valid structure
3498        auto subpass_desc = renderPass ? &renderPass->pCreateInfo->pSubpasses[pPipeline->graphicsPipelineCI.subpass] : nullptr;
3499        if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
3500            subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3501            if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
3502                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
3503                                     __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3504                                     "Invalid Pipeline CreateInfo State: "
3505                                     "pDepthStencilState is NULL when rasterization is enabled and subpass uses a "
3506                                     "depth/stencil attachment");
3507            }
3508        }
3509    }
3510    return skip_call;
3511}
3512
3513// Free the Pipeline nodes
3514static void deletePipelines(layer_data *my_data) {
3515    if (my_data->pipelineMap.size() <= 0)
3516        return;
3517    for (auto &pipe_map_pair : my_data->pipelineMap) {
3518        delete pipe_map_pair.second;
3519    }
3520    my_data->pipelineMap.clear();
3521}
3522
3523// Block of code at start here specifically for managing/tracking DSs
3524
3525// Return Pool node ptr for specified pool or else NULL
3526DESCRIPTOR_POOL_NODE *getPoolNode(const layer_data *dev_data, const VkDescriptorPool pool) {
3527    auto pool_it = dev_data->descriptorPoolMap.find(pool);
3528    if (pool_it == dev_data->descriptorPoolMap.end()) {
3529        return NULL;
3530    }
3531    return pool_it->second;
3532}
3533
3534// Return false if update struct is of valid type, otherwise flag error and return code from callback
3535static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3536    switch (pUpdateStruct->sType) {
3537    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3538    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3539        return false;
3540    default:
3541        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3542                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3543                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3544                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3545    }
3546}
3547
3548// Set count for given update struct in the last parameter
3549static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3550    switch (pUpdateStruct->sType) {
3551    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3552        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3553    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3554        // TODO : Need to understand this case better and make sure code is correct
3555        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3556    default:
3557        return 0;
3558    }
3559}
3560
3561// For given layout and update, return the first overall index of the layout that is updated
3562static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3563                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3564    return binding_start_index + arrayIndex;
3565}
3566// For given layout and update, return the last overall index of the layout that is updated
3567static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3568                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3569    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3570    return binding_start_index + arrayIndex + count - 1;
3571}
3572// Verify that the descriptor type in the update struct matches what's expected by the layout
3573static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type,
3574                                      const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3575    // First get actual type of update
3576    bool skip_call = false;
3577    VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM;
3578    switch (pUpdateStruct->sType) {
3579    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3580        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3581        break;
3582    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3583        /* no need to validate */
3584        return false;
3585        break;
3586    default:
3587        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3588                             DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3589                             "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3590                             string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3591    }
3592    if (!skip_call) {
3593        if (layout_type != actualType) {
3594            skip_call |= log_msg(
3595                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3596                DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3597                "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3598                string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type));
3599        }
3600    }
3601    return skip_call;
3602}
3603//TODO: Consolidate functions
3604bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
3605    layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
3606    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3607        return false;
3608    }
3609    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3610    imgpair.subresource.aspectMask = aspectMask;
3611    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3612    if (imgsubIt == pCB->imageLayoutMap.end()) {
3613        return false;
3614    }
3615    if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
3616        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3617                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3618                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3619                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout));
3620    }
3621    if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
3622        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3623                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3624                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s",
3625                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout));
3626    }
3627    node = imgsubIt->second;
3628    return true;
3629}
3630
3631bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
3632    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3633        return false;
3634    }
3635    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3636    imgpair.subresource.aspectMask = aspectMask;
3637    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3638    if (imgsubIt == my_data->imageLayoutMap.end()) {
3639        return false;
3640    }
3641    if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
3642        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3643                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3644                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3645                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout));
3646    }
3647    layout = imgsubIt->second.layout;
3648    return true;
3649}
3650
3651// find layout(s) on the cmd buf level
3652bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3653    ImageSubresourcePair imgpair = {image, true, range};
3654    node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
3655    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
3656    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
3657    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
3658    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
3659    if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3660        imgpair = {image, false, VkImageSubresource()};
3661        auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3662        if (imgsubIt == pCB->imageLayoutMap.end())
3663            return false;
3664        node = imgsubIt->second;
3665    }
3666    return true;
3667}
3668
3669// find layout(s) on the global level
3670bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3671    layout = VK_IMAGE_LAYOUT_MAX_ENUM;
3672    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3673    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3674    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3675    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3676    if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3677        imgpair = {imgpair.image, false, VkImageSubresource()};
3678        auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3679        if (imgsubIt == my_data->imageLayoutMap.end())
3680            return false;
3681        layout = imgsubIt->second.layout;
3682    }
3683    return true;
3684}
3685
3686bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3687    ImageSubresourcePair imgpair = {image, true, range};
3688    return FindLayout(my_data, imgpair, layout);
3689}
3690
3691bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3692    auto sub_data = my_data->imageSubresourceMap.find(image);
3693    if (sub_data == my_data->imageSubresourceMap.end())
3694        return false;
3695    auto img_node = getImageNode(my_data, image);
3696    if (!img_node)
3697        return false;
3698    bool ignoreGlobal = false;
3699    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3700    // potential errors in this case.
3701    if (sub_data->second.size() >= (img_node->createInfo.arrayLayers * img_node->createInfo.mipLevels + 1)) {
3702        ignoreGlobal = true;
3703    }
3704    for (auto imgsubpair : sub_data->second) {
3705        if (ignoreGlobal && !imgsubpair.hasSubresource)
3706            continue;
3707        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3708        if (img_data != my_data->imageLayoutMap.end()) {
3709            layouts.push_back(img_data->second.layout);
3710        }
3711    }
3712    return true;
3713}
3714
3715// Set the layout on the global level
3716void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3717    VkImage &image = imgpair.image;
3718    // TODO (mlentine): Maybe set format if new? Not used atm.
3719    my_data->imageLayoutMap[imgpair].layout = layout;
3720    // TODO (mlentine): Maybe make vector a set?
3721    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3722    if (subresource == my_data->imageSubresourceMap[image].end()) {
3723        my_data->imageSubresourceMap[image].push_back(imgpair);
3724    }
3725}
3726
3727// Set the layout on the cmdbuf level
3728void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3729    pCB->imageLayoutMap[imgpair] = node;
3730    // TODO (mlentine): Maybe make vector a set?
3731    auto subresource =
3732        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3733    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3734        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3735    }
3736}
3737
3738void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3739    // TODO (mlentine): Maybe make vector a set?
3740    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3741        pCB->imageSubresourceMap[imgpair.image].end()) {
3742        pCB->imageLayoutMap[imgpair].layout = layout;
3743    } else {
3744        // TODO (mlentine): Could be expensive and might need to be removed.
3745        assert(imgpair.hasSubresource);
3746        IMAGE_CMD_BUF_LAYOUT_NODE node;
3747        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3748            node.initialLayout = layout;
3749        }
3750        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3751    }
3752}
3753
3754template <class OBJECT, class LAYOUT>
3755void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3756    if (imgpair.subresource.aspectMask & aspectMask) {
3757        imgpair.subresource.aspectMask = aspectMask;
3758        SetLayout(pObject, imgpair, layout);
3759    }
3760}
3761
3762template <class OBJECT, class LAYOUT>
3763void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3764    ImageSubresourcePair imgpair = {image, true, range};
3765    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3766    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3767    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3768    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3769}
3770
3771template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3772    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3773    SetLayout(pObject, image, imgpair, layout);
3774}
3775
3776void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3777    auto view_state = getImageViewState(dev_data, imageView);
3778    assert(view_state);
3779    auto image = view_state->create_info.image;
3780    const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange;
3781    // TODO: Do not iterate over every possibility - consolidate where possible
3782    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3783        uint32_t level = subRange.baseMipLevel + j;
3784        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3785            uint32_t layer = subRange.baseArrayLayer + k;
3786            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3787            // TODO: If ImageView was created with depth or stencil, transition both layouts as
3788            // the aspectMask is ignored and both are used. Verify that the extra implicit layout
3789            // is OK for descriptor set layout validation
3790            if (subRange.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
3791                if (vk_format_is_depth_and_stencil(view_state->create_info.format)) {
3792                    sub.aspectMask |= (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT);
3793                }
3794            }
3795            SetLayout(pCB, image, sub, layout);
3796        }
3797    }
3798}
3799
3800// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3801// func_str is the name of the calling function
3802// Return false if no errors occur
3803// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
3804static bool validateIdleDescriptorSet(const layer_data *my_data, VkDescriptorSet set, std::string func_str) {
3805    bool skip_call = false;
3806    auto set_node = my_data->setMap.find(set);
3807    if (set_node == my_data->setMap.end()) {
3808        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3809                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3810                             "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3811                             (uint64_t)(set));
3812    } else {
3813        if (set_node->second->in_use.load()) {
3814            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
3815                                 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)(set), __LINE__, DRAWSTATE_OBJECT_INUSE,
3816                                 "DS", "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer.",
3817                                 func_str.c_str(), (uint64_t)(set));
3818        }
3819    }
3820    return skip_call;
3821}
3822
3823// Remove set from setMap and delete the set
3824static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
3825    dev_data->setMap.erase(descriptor_set->GetSet());
3826    delete descriptor_set;
3827}
3828// Free all DS Pools including their Sets & related sub-structs
3829// NOTE : Calls to this function should be wrapped in mutex
3830static void deletePools(layer_data *my_data) {
3831    if (my_data->descriptorPoolMap.size() <= 0)
3832        return;
3833    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
3834        // Remove this pools' sets from setMap and delete them
3835        for (auto ds : (*ii).second->sets) {
3836            freeDescriptorSet(my_data, ds);
3837        }
3838        (*ii).second->sets.clear();
3839    }
3840    my_data->descriptorPoolMap.clear();
3841}
3842
3843static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
3844                                VkDescriptorPoolResetFlags flags) {
3845    DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
3846    // TODO: validate flags
3847    // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
3848    for (auto ds : pPool->sets) {
3849        freeDescriptorSet(my_data, ds);
3850    }
3851    pPool->sets.clear();
3852    // Reset available count for each type and available sets for this pool
3853    for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
3854        pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
3855    }
3856    pPool->availableSets = pPool->maxSets;
3857}
3858
3859// For given CB object, fetch associated CB Node from map
3860static GLOBAL_CB_NODE *getCBNode(layer_data const *my_data, const VkCommandBuffer cb) {
3861    auto it = my_data->commandBufferMap.find(cb);
3862    if (it == my_data->commandBufferMap.end()) {
3863        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3864                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3865                "Attempt to use CommandBuffer 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
3866        return NULL;
3867    }
3868    return it->second;
3869}
3870// Free all CB Nodes
3871// NOTE : Calls to this function should be wrapped in mutex
3872static void deleteCommandBuffers(layer_data *my_data) {
3873    if (my_data->commandBufferMap.empty()) {
3874        return;
3875    }
3876    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
3877        delete (*ii).second;
3878    }
3879    my_data->commandBufferMap.clear();
3880}
3881
3882static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
3883    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3884                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
3885                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
3886}
3887
3888bool validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
3889    if (!pCB->activeRenderPass)
3890        return false;
3891    bool skip_call = false;
3892    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
3893        (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
3894        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3895                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3896                             "Commands cannot be called in a subpass using secondary command buffers.");
3897    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
3898        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3899                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3900                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
3901    }
3902    return skip_call;
3903}
3904
3905static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3906    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
3907        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3908                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3909                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3910    return false;
3911}
3912
3913static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3914    if (!(flags & VK_QUEUE_COMPUTE_BIT))
3915        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3916                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3917                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
3918    return false;
3919}
3920
3921static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3922    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
3923        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3924                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3925                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3926    return false;
3927}
3928
3929// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
3930//  in the recording state or if there's an issue with the Cmd ordering
3931static bool addCmd(layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
3932    bool skip_call = false;
3933    auto pPool = getCommandPoolNode(my_data, pCB->createInfo.commandPool);
3934    if (pPool) {
3935        VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].queueFlags;
3936        switch (cmd) {
3937        case CMD_BINDPIPELINE:
3938        case CMD_BINDPIPELINEDELTA:
3939        case CMD_BINDDESCRIPTORSETS:
3940        case CMD_FILLBUFFER:
3941        case CMD_CLEARCOLORIMAGE:
3942        case CMD_SETEVENT:
3943        case CMD_RESETEVENT:
3944        case CMD_WAITEVENTS:
3945        case CMD_BEGINQUERY:
3946        case CMD_ENDQUERY:
3947        case CMD_RESETQUERYPOOL:
3948        case CMD_COPYQUERYPOOLRESULTS:
3949        case CMD_WRITETIMESTAMP:
3950            skip_call |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3951            break;
3952        case CMD_SETVIEWPORTSTATE:
3953        case CMD_SETSCISSORSTATE:
3954        case CMD_SETLINEWIDTHSTATE:
3955        case CMD_SETDEPTHBIASSTATE:
3956        case CMD_SETBLENDSTATE:
3957        case CMD_SETDEPTHBOUNDSSTATE:
3958        case CMD_SETSTENCILREADMASKSTATE:
3959        case CMD_SETSTENCILWRITEMASKSTATE:
3960        case CMD_SETSTENCILREFERENCESTATE:
3961        case CMD_BINDINDEXBUFFER:
3962        case CMD_BINDVERTEXBUFFER:
3963        case CMD_DRAW:
3964        case CMD_DRAWINDEXED:
3965        case CMD_DRAWINDIRECT:
3966        case CMD_DRAWINDEXEDINDIRECT:
3967        case CMD_BLITIMAGE:
3968        case CMD_CLEARATTACHMENTS:
3969        case CMD_CLEARDEPTHSTENCILIMAGE:
3970        case CMD_RESOLVEIMAGE:
3971        case CMD_BEGINRENDERPASS:
3972        case CMD_NEXTSUBPASS:
3973        case CMD_ENDRENDERPASS:
3974            skip_call |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
3975            break;
3976        case CMD_DISPATCH:
3977        case CMD_DISPATCHINDIRECT:
3978            skip_call |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
3979            break;
3980        case CMD_COPYBUFFER:
3981        case CMD_COPYIMAGE:
3982        case CMD_COPYBUFFERTOIMAGE:
3983        case CMD_COPYIMAGETOBUFFER:
3984        case CMD_CLONEIMAGEDATA:
3985        case CMD_UPDATEBUFFER:
3986        case CMD_PIPELINEBARRIER:
3987        case CMD_EXECUTECOMMANDS:
3988        case CMD_END:
3989            break;
3990        default:
3991            break;
3992        }
3993    }
3994    if (pCB->state != CB_RECORDING) {
3995        skip_call |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
3996    } else {
3997        skip_call |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
3998        CMD_NODE cmdNode = {};
3999        // init cmd node and append to end of cmd LL
4000        cmdNode.cmdNumber = ++pCB->numCmds;
4001        cmdNode.type = cmd;
4002        pCB->cmds.push_back(cmdNode);
4003    }
4004    return skip_call;
4005}
4006// For given object struct return a ptr of BASE_NODE type for its wrapping struct
4007BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
4008    BASE_NODE *base_ptr = nullptr;
4009    switch (object_struct.type) {
4010    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
4011        base_ptr = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
4012        break;
4013    }
4014    case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
4015        base_ptr = getSamplerNode(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
4016        break;
4017    }
4018    case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
4019        base_ptr = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
4020        break;
4021    }
4022    case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
4023        base_ptr = getPipeline(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
4024        break;
4025    }
4026    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
4027        base_ptr = getBufferNode(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
4028        break;
4029    }
4030    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
4031        base_ptr = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
4032        break;
4033    }
4034    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
4035        base_ptr = getImageNode(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
4036        break;
4037    }
4038    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
4039        base_ptr = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
4040        break;
4041    }
4042    case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
4043        base_ptr = getEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
4044        break;
4045    }
4046    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
4047        base_ptr = getPoolNode(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
4048        break;
4049    }
4050    case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
4051        base_ptr = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
4052        break;
4053    }
4054    case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
4055        base_ptr = getFramebuffer(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
4056        break;
4057    }
4058    case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
4059        base_ptr = getRenderPass(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
4060        break;
4061    }
4062    case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
4063        base_ptr = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
4064        break;
4065    }
4066    default:
4067        // TODO : Any other objects to be handled here?
4068        assert(0);
4069        break;
4070    }
4071    return base_ptr;
4072}
4073
4074// Tie the VK_OBJECT to the cmd buffer which includes:
4075//  Add object_binding to cmd buffer
4076//  Add cb_binding to object
4077static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
4078    cb_bindings->insert(cb_node);
4079    cb_node->object_bindings.insert(obj);
4080}
4081// For a given object, if cb_node is in that objects cb_bindings, remove cb_node
4082static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
4083    BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
4084    if (base_obj)
4085        base_obj->cb_bindings.erase(cb_node);
4086}
4087// Reset the command buffer state
4088//  Maintain the createInfo and set state to CB_NEW, but clear all other state
4089static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
4090    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
4091    if (pCB) {
4092        pCB->in_use.store(0);
4093        pCB->cmds.clear();
4094        // Reset CB state (note that createInfo is not cleared)
4095        pCB->commandBuffer = cb;
4096        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
4097        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
4098        pCB->numCmds = 0;
4099        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
4100        pCB->state = CB_NEW;
4101        pCB->submitCount = 0;
4102        pCB->status = 0;
4103        pCB->viewportMask = 0;
4104        pCB->scissorMask = 0;
4105
4106        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4107            pCB->lastBound[i].reset();
4108        }
4109
4110        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
4111        pCB->activeRenderPass = nullptr;
4112        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
4113        pCB->activeSubpass = 0;
4114        pCB->broken_bindings.clear();
4115        pCB->waitedEvents.clear();
4116        pCB->events.clear();
4117        pCB->writeEventsBeforeWait.clear();
4118        pCB->waitedEventsBeforeQueryReset.clear();
4119        pCB->queryToStateMap.clear();
4120        pCB->activeQueries.clear();
4121        pCB->startedQueries.clear();
4122        pCB->imageSubresourceMap.clear();
4123        pCB->imageLayoutMap.clear();
4124        pCB->eventToStageMap.clear();
4125        pCB->drawData.clear();
4126        pCB->currentDrawData.buffers.clear();
4127        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
4128        // Make sure any secondaryCommandBuffers are removed from globalInFlight
4129        for (auto secondary_cb : pCB->secondaryCommandBuffers) {
4130            dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
4131        }
4132        pCB->secondaryCommandBuffers.clear();
4133        pCB->updateImages.clear();
4134        pCB->updateBuffers.clear();
4135        clear_cmd_buf_and_mem_references(dev_data, pCB);
4136        pCB->eventUpdates.clear();
4137        pCB->queryUpdates.clear();
4138
4139        // Remove object bindings
4140        for (auto obj : pCB->object_bindings) {
4141            removeCommandBufferBinding(dev_data, &obj, pCB);
4142        }
4143        pCB->object_bindings.clear();
4144        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
4145        for (auto framebuffer : pCB->framebuffers) {
4146            auto fb_node = getFramebuffer(dev_data, framebuffer);
4147            if (fb_node)
4148                fb_node->cb_bindings.erase(pCB);
4149        }
4150        pCB->framebuffers.clear();
4151        pCB->activeFramebuffer = VK_NULL_HANDLE;
4152    }
4153}
4154
4155// Set PSO-related status bits for CB, including dynamic state set via PSO
4156static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
4157    // Account for any dynamic state not set via this PSO
4158    if (!pPipe->graphicsPipelineCI.pDynamicState ||
4159        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static
4160        pCB->status |= CBSTATUS_ALL;
4161    } else {
4162        // First consider all state on
4163        // Then unset any state that's noted as dynamic in PSO
4164        // Finally OR that into CB statemask
4165        CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
4166        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
4167            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
4168            case VK_DYNAMIC_STATE_LINE_WIDTH:
4169                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
4170                break;
4171            case VK_DYNAMIC_STATE_DEPTH_BIAS:
4172                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
4173                break;
4174            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
4175                psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
4176                break;
4177            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
4178                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
4179                break;
4180            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
4181                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4182                break;
4183            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4184                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4185                break;
4186            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4187                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4188                break;
4189            default:
4190                // TODO : Flag error here
4191                break;
4192            }
4193        }
4194        pCB->status |= psoDynStateMask;
4195    }
4196}
4197
4198// Print the last bound Gfx Pipeline
4199static bool printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
4200    bool skip_call = false;
4201    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4202    if (pCB) {
4203        PIPELINE_NODE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_node;
4204        if (!pPipeTrav) {
4205            // nothing to print
4206        } else {
4207            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4208                                 __LINE__, DRAWSTATE_NONE, "DS", "%s",
4209                                 vk_print_vkgraphicspipelinecreateinfo(
4210                                     reinterpret_cast<const VkGraphicsPipelineCreateInfo *>(&pPipeTrav->graphicsPipelineCI), "{DS}")
4211                                     .c_str());
4212        }
4213    }
4214    return skip_call;
4215}
4216
4217static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
4218    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4219    if (pCB && pCB->cmds.size() > 0) {
4220        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4221                DRAWSTATE_NONE, "DS", "Cmds in CB 0x%p", (void *)cb);
4222        vector<CMD_NODE> cmds = pCB->cmds;
4223        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
4224            // TODO : Need to pass cb as srcObj here
4225            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4226                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD 0x%" PRIx64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
4227        }
4228    } else {
4229        // Nothing to print
4230    }
4231}
4232
4233static bool synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
4234    bool skip_call = false;
4235    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
4236        return skip_call;
4237    }
4238    skip_call |= printPipeline(my_data, cb);
4239    return skip_call;
4240}
4241
4242// Flags validation error if the associated call is made inside a render pass. The apiName
4243// routine should ONLY be called outside a render pass.
4244static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4245    bool inside = false;
4246    if (pCB->activeRenderPass) {
4247        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4248                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
4249                         "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 ")", apiName,
4250                         (uint64_t)pCB->activeRenderPass->renderPass);
4251    }
4252    return inside;
4253}
4254
4255// Flags validation error if the associated call is made outside a render pass. The apiName
4256// routine should ONLY be called inside a render pass.
4257static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4258    bool outside = false;
4259    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4260        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4261         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4262        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4263                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
4264                          "%s: This call must be issued inside an active render pass.", apiName);
4265    }
4266    return outside;
4267}
4268
4269static void init_core_validation(layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
4270
4271    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
4272
4273}
4274
4275VKAPI_ATTR VkResult VKAPI_CALL
4276CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
4277    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4278
4279    assert(chain_info->u.pLayerInfo);
4280    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4281    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4282    if (fpCreateInstance == NULL)
4283        return VK_ERROR_INITIALIZATION_FAILED;
4284
4285    // Advance the link info for the next element on the chain
4286    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4287
4288    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4289    if (result != VK_SUCCESS)
4290        return result;
4291
4292    layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
4293    instance_data->instance = *pInstance;
4294    instance_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
4295    layer_init_instance_dispatch_table(*pInstance, instance_data->instance_dispatch_table, fpGetInstanceProcAddr);
4296
4297    instance_data->report_data =
4298        debug_report_create_instance(instance_data->instance_dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount,
4299                                     pCreateInfo->ppEnabledExtensionNames);
4300    init_core_validation(instance_data, pAllocator);
4301
4302    instance_data->instance_state = unique_ptr<INSTANCE_STATE>(new INSTANCE_STATE());
4303    ValidateLayerOrdering(*pCreateInfo);
4304
4305    return result;
4306}
4307
4308/* hook DestroyInstance to remove tableInstanceMap entry */
4309VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
4310    // TODOSC : Shouldn't need any customization here
4311    dispatch_key key = get_dispatch_key(instance);
4312    // TBD: Need any locking this early, in case this function is called at the
4313    // same time by more than one thread?
4314    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
4315    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
4316    pTable->DestroyInstance(instance, pAllocator);
4317
4318    std::lock_guard<std::mutex> lock(global_lock);
4319    // Clean up logging callback, if any
4320    while (my_data->logging_callback.size() > 0) {
4321        VkDebugReportCallbackEXT callback = my_data->logging_callback.back();
4322        layer_destroy_msg_callback(my_data->report_data, callback, pAllocator);
4323        my_data->logging_callback.pop_back();
4324    }
4325
4326    layer_debug_report_destroy_instance(my_data->report_data);
4327    delete my_data->instance_dispatch_table;
4328    layer_data_map.erase(key);
4329}
4330
4331static void checkDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
4332    uint32_t i;
4333    // TBD: Need any locking, in case this function is called at the same time
4334    // by more than one thread?
4335    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4336    dev_data->device_extensions.wsi_enabled = false;
4337    dev_data->device_extensions.wsi_display_swapchain_enabled = false;
4338
4339    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4340        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4341            dev_data->device_extensions.wsi_enabled = true;
4342        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME) == 0)
4343            dev_data->device_extensions.wsi_display_swapchain_enabled = true;
4344    }
4345}
4346
4347// Verify that queue family has been properly requested
4348bool ValidateRequestedQueueFamilyProperties(layer_data *dev_data, const VkDeviceCreateInfo *create_info) {
4349    bool skip_call = false;
4350    // First check is app has actually requested queueFamilyProperties
4351    if (!dev_data->physical_device_state) {
4352        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
4353                             0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
4354                             "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
4355    } else if (QUERY_DETAILS != dev_data->physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
4356        // TODO: This is not called out as an invalid use in the spec so make more informative recommendation.
4357        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
4358                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST,
4359                             "DL", "Call to vkCreateDevice() w/o first calling vkGetPhysicalDeviceQueueFamilyProperties().");
4360    } else {
4361        // Check that the requested queue properties are valid
4362        for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
4363            uint32_t requestedIndex = create_info->pQueueCreateInfos[i].queueFamilyIndex;
4364            if (dev_data->queue_family_properties.size() <=
4365                requestedIndex) { // requested index is out of bounds for this physical device
4366                skip_call |= log_msg(
4367                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
4368                    __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4369                    "Invalid queue create request in vkCreateDevice(). Invalid queueFamilyIndex %u requested.", requestedIndex);
4370            } else if (create_info->pQueueCreateInfos[i].queueCount >
4371                       dev_data->queue_family_properties[requestedIndex]->queueCount) {
4372                skip_call |=
4373                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
4374                            0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4375                            "Invalid queue create request in vkCreateDevice(). QueueFamilyIndex %u only has %u queues, but "
4376                            "requested queueCount is %u.",
4377                            requestedIndex, dev_data->queue_family_properties[requestedIndex]->queueCount,
4378                            create_info->pQueueCreateInfos[i].queueCount);
4379            }
4380        }
4381    }
4382    return skip_call;
4383}
4384
4385// Verify that features have been queried and that they are available
4386static bool ValidateRequestedFeatures(layer_data *dev_data, const VkPhysicalDeviceFeatures *requested_features) {
4387    bool skip_call = false;
4388
4389    VkBool32 *actual = reinterpret_cast<VkBool32 *>(&(dev_data->physical_device_features));
4390    const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
4391    // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
4392    //  Need to provide the struct member name with the issue. To do that seems like we'll
4393    //  have to loop through each struct member which should be done w/ codegen to keep in synch.
4394    uint32_t errors = 0;
4395    uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
4396    for (uint32_t i = 0; i < total_bools; i++) {
4397        if (requested[i] > actual[i]) {
4398            // TODO: Add index to struct member name helper to be able to include a feature name
4399            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4400                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4401                "DL", "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, "
4402                "which is not available on this device.",
4403                i);
4404            errors++;
4405        }
4406    }
4407    if (errors && (UNCALLED == dev_data->physical_device_state->vkGetPhysicalDeviceFeaturesState)) {
4408        // If user didn't request features, notify them that they should
4409        // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
4410        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4411                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4412                             "DL", "You requested features that are unavailable on this device. You should first query feature "
4413                                   "availability by calling vkGetPhysicalDeviceFeatures().");
4414    }
4415    return skip_call;
4416}
4417
4418VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4419                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4420    layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
4421    bool skip_call = false;
4422
4423    // Check that any requested features are available
4424    if (pCreateInfo->pEnabledFeatures) {
4425        skip_call |= ValidateRequestedFeatures(my_instance_data, pCreateInfo->pEnabledFeatures);
4426    }
4427    skip_call |= ValidateRequestedQueueFamilyProperties(my_instance_data, pCreateInfo);
4428
4429    if (skip_call) {
4430        return VK_ERROR_VALIDATION_FAILED_EXT;
4431    }
4432
4433    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4434
4435    assert(chain_info->u.pLayerInfo);
4436    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4437    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4438    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice");
4439    if (fpCreateDevice == NULL) {
4440        return VK_ERROR_INITIALIZATION_FAILED;
4441    }
4442
4443    // Advance the link info for the next element on the chain
4444    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4445
4446    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4447    if (result != VK_SUCCESS) {
4448        return result;
4449    }
4450
4451    std::unique_lock<std::mutex> lock(global_lock);
4452    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4453
4454    // Setup device dispatch table
4455    my_device_data->device_dispatch_table = new VkLayerDispatchTable;
4456    layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
4457    my_device_data->device = *pDevice;
4458
4459    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4460    checkDeviceRegisterExtensions(pCreateInfo, *pDevice);
4461    // Get physical device limits for this device
4462    my_instance_data->instance_dispatch_table->GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties));
4463    uint32_t count;
4464    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4465    my_device_data->phys_dev_properties.queue_family_properties.resize(count);
4466    my_instance_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(
4467        gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]);
4468    // TODO: device limits should make sure these are compatible
4469    if (pCreateInfo->pEnabledFeatures) {
4470        my_device_data->phys_dev_properties.features = *pCreateInfo->pEnabledFeatures;
4471    } else {
4472        memset(&my_device_data->phys_dev_properties.features, 0, sizeof(VkPhysicalDeviceFeatures));
4473    }
4474    // Store physical device mem limits into device layer_data struct
4475    my_instance_data->instance_dispatch_table->GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props);
4476    lock.unlock();
4477
4478    ValidateLayerOrdering(*pCreateInfo);
4479
4480    return result;
4481}
4482
4483// prototype
4484static void deleteRenderPasses(layer_data *);
4485VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4486    // TODOSC : Shouldn't need any customization here
4487    dispatch_key key = get_dispatch_key(device);
4488    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4489    // Free all the memory
4490    std::unique_lock<std::mutex> lock(global_lock);
4491    deletePipelines(dev_data);
4492    deleteRenderPasses(dev_data);
4493    deleteCommandBuffers(dev_data);
4494    // This will also delete all sets in the pool & remove them from setMap
4495    deletePools(dev_data);
4496    // All sets should be removed
4497    assert(dev_data->setMap.empty());
4498    for (auto del_layout : dev_data->descriptorSetLayoutMap) {
4499        delete del_layout.second;
4500    }
4501    dev_data->descriptorSetLayoutMap.clear();
4502    dev_data->imageViewMap.clear();
4503    dev_data->imageMap.clear();
4504    dev_data->imageSubresourceMap.clear();
4505    dev_data->imageLayoutMap.clear();
4506    dev_data->bufferViewMap.clear();
4507    dev_data->bufferMap.clear();
4508    // Queues persist until device is destroyed
4509    dev_data->queueMap.clear();
4510    lock.unlock();
4511#if MTMERGESOURCE
4512    bool skip_call = false;
4513    lock.lock();
4514    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4515            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4516    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4517            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4518    print_mem_list(dev_data);
4519    printCBList(dev_data);
4520    // Report any memory leaks
4521    DEVICE_MEM_INFO *pInfo = NULL;
4522    if (!dev_data->memObjMap.empty()) {
4523        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4524            pInfo = (*ii).second.get();
4525            if (pInfo->alloc_info.allocationSize != 0) {
4526                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4527                skip_call |=
4528                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4529                            (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK, "MEM",
4530                            "Mem Object 0x%" PRIx64 " has not been freed. You should clean up this memory by calling "
4531                            "vkFreeMemory(0x%" PRIx64 ") prior to vkDestroyDevice().",
4532                            (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4533            }
4534        }
4535    }
4536    layer_debug_report_destroy_device(device);
4537    lock.unlock();
4538
4539#if DISPATCH_MAP_DEBUG
4540    fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
4541#endif
4542    VkLayerDispatchTable *pDisp = dev_data->device_dispatch_table;
4543    if (!skip_call) {
4544        pDisp->DestroyDevice(device, pAllocator);
4545    }
4546#else
4547    dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
4548#endif
4549    delete dev_data->device_dispatch_table;
4550    layer_data_map.erase(key);
4551}
4552
4553static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4554
4555// This validates that the initial layout specified in the command buffer for
4556// the IMAGE is the same
4557// as the global IMAGE layout
4558static bool ValidateCmdBufImageLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4559    bool skip_call = false;
4560    for (auto cb_image_data : pCB->imageLayoutMap) {
4561        VkImageLayout imageLayout;
4562        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4563            skip_call |=
4564                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4565                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image 0x%" PRIx64 ".",
4566                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4567        } else {
4568            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4569                // TODO: Set memory invalid which is in mem_tracker currently
4570            } else if (imageLayout != cb_image_data.second.initialLayout) {
4571                if (cb_image_data.first.hasSubresource) {
4572                    skip_call |= log_msg(
4573                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4574                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4575                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") [sub-resource: aspectMask 0x%X array layer %u, mip level %u], "
4576                        "with layout %s when first use is %s.",
4577                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.aspectMask,
4578                                cb_image_data.first.subresource.arrayLayer,
4579                                cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout),
4580                        string_VkImageLayout(cb_image_data.second.initialLayout));
4581                } else {
4582                    skip_call |= log_msg(
4583                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4584                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4585                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") with layout %s when "
4586                        "first use is %s.",
4587                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4588                        string_VkImageLayout(cb_image_data.second.initialLayout));
4589                }
4590            }
4591            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4592        }
4593    }
4594    return skip_call;
4595}
4596
4597// Loop through bound objects and increment their in_use counts
4598//  For any unknown objects, flag an error
4599static bool ValidateAndIncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
4600    bool skip = false;
4601    DRAW_STATE_ERROR error_code = DRAWSTATE_NONE;
4602    BASE_NODE *base_obj = nullptr;
4603    for (auto obj : cb_node->object_bindings) {
4604        switch (obj.type) {
4605        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
4606            base_obj = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(obj.handle));
4607            error_code = DRAWSTATE_INVALID_DESCRIPTOR_SET;
4608            break;
4609        }
4610        case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
4611            base_obj = getSamplerNode(dev_data, reinterpret_cast<VkSampler &>(obj.handle));
4612            error_code = DRAWSTATE_INVALID_SAMPLER;
4613            break;
4614        }
4615        case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
4616            base_obj = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(obj.handle));
4617            error_code = DRAWSTATE_INVALID_QUERY_POOL;
4618            break;
4619        }
4620        case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
4621            base_obj = getPipeline(dev_data, reinterpret_cast<VkPipeline &>(obj.handle));
4622            error_code = DRAWSTATE_INVALID_PIPELINE;
4623            break;
4624        }
4625        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
4626            base_obj = getBufferNode(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
4627            error_code = DRAWSTATE_INVALID_BUFFER;
4628            break;
4629        }
4630        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
4631            base_obj = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(obj.handle));
4632            error_code = DRAWSTATE_INVALID_BUFFER_VIEW;
4633            break;
4634        }
4635        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
4636            base_obj = getImageNode(dev_data, reinterpret_cast<VkImage &>(obj.handle));
4637            error_code = DRAWSTATE_INVALID_IMAGE;
4638            break;
4639        }
4640        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
4641            base_obj = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(obj.handle));
4642            error_code = DRAWSTATE_INVALID_IMAGE_VIEW;
4643            break;
4644        }
4645        case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
4646            base_obj = getEventNode(dev_data, reinterpret_cast<VkEvent &>(obj.handle));
4647            error_code = DRAWSTATE_INVALID_EVENT;
4648            break;
4649        }
4650        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
4651            base_obj = getPoolNode(dev_data, reinterpret_cast<VkDescriptorPool &>(obj.handle));
4652            error_code = DRAWSTATE_INVALID_DESCRIPTOR_POOL;
4653            break;
4654        }
4655        case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
4656            base_obj = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(obj.handle));
4657            error_code = DRAWSTATE_INVALID_COMMAND_POOL;
4658            break;
4659        }
4660        case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
4661            base_obj = getFramebuffer(dev_data, reinterpret_cast<VkFramebuffer &>(obj.handle));
4662            error_code = DRAWSTATE_INVALID_FRAMEBUFFER;
4663            break;
4664        }
4665        case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
4666            base_obj = getRenderPass(dev_data, reinterpret_cast<VkRenderPass &>(obj.handle));
4667            error_code = DRAWSTATE_INVALID_RENDERPASS;
4668            break;
4669        }
4670        case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
4671            base_obj = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(obj.handle));
4672            error_code = DRAWSTATE_INVALID_DEVICE_MEMORY;
4673            break;
4674        }
4675        default:
4676            // TODO : Merge handling of other objects types into this code
4677            break;
4678        }
4679        if (!base_obj) {
4680            skip |=
4681                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj.type, obj.handle, __LINE__, error_code, "DS",
4682                        "Cannot submit cmd buffer using deleted %s 0x%" PRIx64 ".", object_type_to_string(obj.type), obj.handle);
4683        } else {
4684            base_obj->in_use.fetch_add(1);
4685        }
4686    }
4687    return skip;
4688}
4689
4690// Track which resources are in-flight by atomically incrementing their "in_use" count
4691static bool validateAndIncrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
4692    bool skip_call = false;
4693
4694    cb_node->in_use.fetch_add(1);
4695    dev_data->globalInFlightCmdBuffers.insert(cb_node->commandBuffer);
4696
4697    // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
4698    skip_call |= ValidateAndIncrementBoundObjects(dev_data, cb_node);
4699    // TODO : We should be able to remove the NULL look-up checks from the code below as long as
4700    //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
4701    //  should then be flagged prior to calling this function
4702    for (auto drawDataElement : cb_node->drawData) {
4703        for (auto buffer : drawDataElement.buffers) {
4704            auto buffer_node = getBufferNode(dev_data, buffer);
4705            if (!buffer_node) {
4706                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4707                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4708                                     "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer));
4709            } else {
4710                buffer_node->in_use.fetch_add(1);
4711            }
4712        }
4713    }
4714    for (auto event : cb_node->writeEventsBeforeWait) {
4715        auto event_node = getEventNode(dev_data, event);
4716        if (event_node)
4717            event_node->write_in_use++;
4718    }
4719    return skip_call;
4720}
4721
4722// Note: This function assumes that the global lock is held by the calling
4723// thread.
4724// TODO: untangle this.
4725static bool cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4726    bool skip_call = false;
4727    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4728    if (pCB) {
4729        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
4730            for (auto event : queryEventsPair.second) {
4731                if (my_data->eventMap[event].needsSignaled) {
4732                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4733                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
4734                                         "Cannot get query results on queryPool 0x%" PRIx64
4735                                         " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
4736                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
4737                }
4738            }
4739        }
4740    }
4741    return skip_call;
4742}
4743
4744// TODO: nuke this completely.
4745// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
4746static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
4747    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
4748    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer);
4749    pCB->in_use.fetch_sub(1);
4750    if (!pCB->in_use.load()) {
4751        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
4752    }
4753}
4754
4755// Decrement in-use count for objects bound to command buffer
4756static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
4757    BASE_NODE *base_obj = nullptr;
4758    for (auto obj : cb_node->object_bindings) {
4759        base_obj = GetStateStructPtrFromObject(dev_data, obj);
4760        if (base_obj) {
4761            base_obj->in_use.fetch_sub(1);
4762        }
4763    }
4764}
4765
4766static bool RetireWorkOnQueue(layer_data *dev_data, QUEUE_NODE *pQueue, uint64_t seq)
4767{
4768    bool skip_call = false; // TODO: extract everything that might fail to precheck
4769    std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
4770
4771    // Roll this queue forward, one submission at a time.
4772    while (pQueue->seq < seq) {
4773        auto & submission = pQueue->submissions.front();
4774
4775        for (auto & wait : submission.waitSemaphores) {
4776            auto pSemaphore = getSemaphoreNode(dev_data, wait.semaphore);
4777            pSemaphore->in_use.fetch_sub(1);
4778            auto & lastSeq = otherQueueSeqs[wait.queue];
4779            lastSeq = std::max(lastSeq, wait.seq);
4780        }
4781
4782        for (auto & semaphore : submission.signalSemaphores) {
4783            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4784            pSemaphore->in_use.fetch_sub(1);
4785        }
4786
4787        for (auto cb : submission.cbs) {
4788            auto cb_node = getCBNode(dev_data, cb);
4789            // First perform decrement on general case bound objects
4790            DecrementBoundResources(dev_data, cb_node);
4791            for (auto drawDataElement : cb_node->drawData) {
4792                for (auto buffer : drawDataElement.buffers) {
4793                    auto buffer_node = getBufferNode(dev_data, buffer);
4794                    if (buffer_node) {
4795                        buffer_node->in_use.fetch_sub(1);
4796                    }
4797                }
4798            }
4799            for (auto event : cb_node->writeEventsBeforeWait) {
4800                auto eventNode = dev_data->eventMap.find(event);
4801                if (eventNode != dev_data->eventMap.end()) {
4802                    eventNode->second.write_in_use--;
4803                }
4804            }
4805            for (auto queryStatePair : cb_node->queryToStateMap) {
4806                dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4807            }
4808            for (auto eventStagePair : cb_node->eventToStageMap) {
4809                dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4810            }
4811
4812            skip_call |= cleanInFlightCmdBuffer(dev_data, cb);
4813            removeInFlightCmdBuffer(dev_data, cb);
4814        }
4815
4816        auto pFence = getFenceNode(dev_data, submission.fence);
4817        if (pFence) {
4818            pFence->state = FENCE_RETIRED;
4819        }
4820
4821        pQueue->submissions.pop_front();
4822        pQueue->seq++;
4823    }
4824
4825    // Roll other queues forward to the highest seq we saw a wait for
4826    for (auto qs : otherQueueSeqs) {
4827        skip_call |= RetireWorkOnQueue(dev_data, getQueueNode(dev_data, qs.first), qs.second);
4828    }
4829
4830    return skip_call;
4831}
4832
4833
4834// Submit a fence to a queue, delimiting previous fences and previous untracked
4835// work by it.
4836static void
4837SubmitFence(QUEUE_NODE *pQueue, FENCE_NODE *pFence, uint64_t submitCount)
4838{
4839    pFence->state = FENCE_INFLIGHT;
4840    pFence->signaler.first = pQueue->queue;
4841    pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
4842}
4843
4844static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4845    bool skip_call = false;
4846    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
4847        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4848        skip_call |=
4849            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4850                    __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
4851                    "Command Buffer 0x%" PRIx64 " is already in use and is not marked for simultaneous use.",
4852                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
4853    }
4854    return skip_call;
4855}
4856
4857static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4858    bool skip_call = false;
4859    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
4860    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
4861        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4862                             0, __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4863                             "CB 0x%" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
4864                             "set, but has been submitted 0x%" PRIxLEAST64 " times.",
4865                             (uint64_t)(pCB->commandBuffer), pCB->submitCount);
4866    }
4867    // Validate that cmd buffers have been updated
4868    if (CB_RECORDED != pCB->state) {
4869        if (CB_INVALID == pCB->state) {
4870            // Inform app of reason CB invalid
4871            for (auto obj : pCB->broken_bindings) {
4872                const char *type_str = object_type_to_string(obj.type);
4873                // Descriptor sets are a special case that can be either destroyed or updated to invalidated a CB
4874                const char *cause_str =
4875                    (obj.type == VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT) ? "destroyed or updated" : "destroyed";
4876
4877                skip_call |=
4878                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4879                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4880                            "You are submitting command buffer 0x%" PRIxLEAST64 " that is invalid because bound %s 0x%" PRIxLEAST64
4881                            " was %s.",
4882                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), type_str, obj.handle, cause_str);
4883            }
4884        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
4885            skip_call |=
4886                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4887                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
4888                        "You must call vkEndCommandBuffer() on CB 0x%" PRIxLEAST64 " before this call to vkQueueSubmit()!",
4889                        (uint64_t)(pCB->commandBuffer));
4890        }
4891    }
4892    return skip_call;
4893}
4894
4895// Validate that queueFamilyIndices of primary command buffers match this queue
4896// Secondary command buffers were previously validated in vkCmdExecuteCommands().
4897static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
4898    bool skip_call = false;
4899    auto pPool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
4900    auto queue_node = getQueueNode(dev_data, queue);
4901
4902    if (pPool && queue_node && (pPool->queueFamilyIndex != queue_node->queueFamilyIndex)) {
4903        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4904            reinterpret_cast<uint64_t>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
4905            "vkQueueSubmit: Primary command buffer 0x%" PRIxLEAST64
4906            " created in queue family %d is being submitted on queue 0x%" PRIxLEAST64 " from queue family %d.",
4907            reinterpret_cast<uint64_t>(pCB->commandBuffer), pPool->queueFamilyIndex,
4908            reinterpret_cast<uint64_t>(queue), queue_node->queueFamilyIndex);
4909    }
4910
4911    return skip_call;
4912}
4913
4914static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4915    // Track in-use for resources off of primary and any secondary CBs
4916    bool skip_call = false;
4917
4918    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
4919    // on device
4920    skip_call |= validateCommandBufferSimultaneousUse(dev_data, pCB);
4921
4922    skip_call |= validateAndIncrementResources(dev_data, pCB);
4923
4924    if (!pCB->secondaryCommandBuffers.empty()) {
4925        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
4926            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
4927            skip_call |= validateAndIncrementResources(dev_data, pSubCB);
4928            if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
4929                !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4930                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4931                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4932                        "CB 0x%" PRIxLEAST64 " was submitted with secondary buffer 0x%" PRIxLEAST64
4933                        " but that buffer has subsequently been bound to "
4934                        "primary cmd buffer 0x%" PRIxLEAST64
4935                        " and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
4936                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
4937                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
4938            }
4939        }
4940    }
4941
4942    skip_call |= validateCommandBufferState(dev_data, pCB);
4943
4944    return skip_call;
4945}
4946
4947static bool
4948ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence)
4949{
4950    bool skip_call = false;
4951
4952    if (pFence) {
4953        if (pFence->state == FENCE_INFLIGHT) {
4954            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4955                                 (uint64_t)(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4956                                 "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(pFence->fence));
4957        }
4958
4959        else if (pFence->state == FENCE_RETIRED) {
4960            skip_call |=
4961                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4962                        reinterpret_cast<uint64_t &>(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4963                        "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
4964                        reinterpret_cast<uint64_t &>(pFence->fence));
4965        }
4966    }
4967
4968    return skip_call;
4969}
4970
4971
4972VKAPI_ATTR VkResult VKAPI_CALL
4973QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
4974    bool skip_call = false;
4975    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4976    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4977    std::unique_lock<std::mutex> lock(global_lock);
4978
4979    auto pQueue = getQueueNode(dev_data, queue);
4980    auto pFence = getFenceNode(dev_data, fence);
4981    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
4982
4983    if (skip_call) {
4984        return VK_ERROR_VALIDATION_FAILED_EXT;
4985    }
4986
4987    // TODO : Review these old print functions and clean up as appropriate
4988    print_mem_list(dev_data);
4989    printCBList(dev_data);
4990
4991    // Mark the fence in-use.
4992    if (pFence) {
4993        SubmitFence(pQueue, pFence, std::max(1u, submitCount));
4994    }
4995
4996    // Now verify each individual submit
4997    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4998        const VkSubmitInfo *submit = &pSubmits[submit_idx];
4999        vector<SEMAPHORE_WAIT> semaphore_waits;
5000        vector<VkSemaphore> semaphore_signals;
5001        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
5002            VkSemaphore semaphore = submit->pWaitSemaphores[i];
5003            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
5004            if (pSemaphore) {
5005                if (pSemaphore->signaled) {
5006                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
5007                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
5008                        pSemaphore->in_use.fetch_add(1);
5009                    }
5010                    pSemaphore->signaler.first = VK_NULL_HANDLE;
5011                    pSemaphore->signaled = false;
5012                } else {
5013                    skip_call |=
5014                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5015                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
5016                                "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
5017                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
5018                }
5019            }
5020        }
5021        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
5022            VkSemaphore semaphore = submit->pSignalSemaphores[i];
5023            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
5024            if (pSemaphore) {
5025                if (pSemaphore->signaled) {
5026                    skip_call |=
5027                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5028                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
5029                                "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
5030                                " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
5031                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
5032                                reinterpret_cast<uint64_t &>(pSemaphore->signaler.first));
5033                } else {
5034                    pSemaphore->signaler.first = queue;
5035                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
5036                    pSemaphore->signaled = true;
5037                    pSemaphore->in_use.fetch_add(1);
5038                    semaphore_signals.push_back(semaphore);
5039                }
5040            }
5041        }
5042
5043        std::vector<VkCommandBuffer> cbs;
5044
5045        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5046            auto pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5047            skip_call |= ValidateCmdBufImageLayouts(dev_data, pCBNode);
5048            if (pCBNode) {
5049                cbs.push_back(submit->pCommandBuffers[i]);
5050                for (auto secondaryCmdBuffer : pCBNode->secondaryCommandBuffers) {
5051                    cbs.push_back(secondaryCmdBuffer);
5052                }
5053
5054                pCBNode->submitCount++; // increment submit count
5055                skip_call |= validatePrimaryCommandBufferState(dev_data, pCBNode);
5056                skip_call |= validateQueueFamilyIndices(dev_data, pCBNode, queue);
5057                // Potential early exit here as bad object state may crash in delayed function calls
5058                if (skip_call)
5059                    return result;
5060                // Call submit-time functions to validate/update state
5061                for (auto &function : pCBNode->validate_functions) {
5062                    skip_call |= function();
5063                }
5064                for (auto &function : pCBNode->eventUpdates) {
5065                    skip_call |= function(queue);
5066                }
5067                for (auto &function : pCBNode->queryUpdates) {
5068                    skip_call |= function(queue);
5069                }
5070            }
5071        }
5072
5073        pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals,
5074                                         submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
5075    }
5076
5077    if (pFence && !submitCount) {
5078        // If no submissions, but just dropping a fence on the end of the queue,
5079        // record an empty submission with just the fence, so we can determine
5080        // its completion.
5081        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
5082                                         std::vector<SEMAPHORE_WAIT>(),
5083                                         std::vector<VkSemaphore>(),
5084                                         fence);
5085    }
5086
5087    lock.unlock();
5088    if (!skip_call)
5089        result = dev_data->device_dispatch_table->QueueSubmit(queue, submitCount, pSubmits, fence);
5090
5091    return result;
5092}
5093
5094VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
5095                                              const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
5096    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5097    VkResult result = my_data->device_dispatch_table->AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
5098    // TODO : Track allocations and overall size here
5099    std::lock_guard<std::mutex> lock(global_lock);
5100    add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
5101    print_mem_list(my_data);
5102    return result;
5103}
5104
5105VKAPI_ATTR void VKAPI_CALL
5106FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
5107    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5108
5109    // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
5110    // Before freeing a memory object, an application must ensure the memory object is no longer
5111    // in use by the device—for example by command buffers queued for execution. The memory need
5112    // not yet be unbound from all images and buffers, but any further use of those images or
5113    // buffers (on host or device) for anything other than destroying those objects will result in
5114    // undefined behavior.
5115
5116    std::unique_lock<std::mutex> lock(global_lock);
5117    bool skip_call = freeMemObjInfo(my_data, device, mem, false);
5118    print_mem_list(my_data);
5119    printCBList(my_data);
5120    lock.unlock();
5121    if (!skip_call) {
5122        my_data->device_dispatch_table->FreeMemory(device, mem, pAllocator);
5123    }
5124}
5125
5126// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
5127//  and that the size of the map range should be:
5128//  1. Not zero
5129//  2. Within the size of the memory allocation
5130static bool ValidateMapMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5131    bool skip_call = false;
5132
5133    if (size == 0) {
5134        skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5135                            (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5136                            "VkMapMemory: Attempting to map memory range of size zero");
5137    }
5138
5139    auto mem_element = my_data->memObjMap.find(mem);
5140    if (mem_element != my_data->memObjMap.end()) {
5141        auto mem_info = mem_element->second.get();
5142        // It is an application error to call VkMapMemory on an object that is already mapped
5143        if (mem_info->mem_range.size != 0) {
5144            skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5145                                (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5146                                "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem);
5147        }
5148
5149        // Validate that offset + size is within object's allocationSize
5150        if (size == VK_WHOLE_SIZE) {
5151            if (offset >= mem_info->alloc_info.allocationSize) {
5152                skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5153                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5154                                    "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
5155                                           " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
5156                                    offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
5157            }
5158        } else {
5159            if ((offset + size) > mem_info->alloc_info.allocationSize) {
5160                skip_call =
5161                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5162                            (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5163                            "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64, offset,
5164                            size + offset, mem_info->alloc_info.allocationSize);
5165            }
5166        }
5167    }
5168    return skip_call;
5169}
5170
5171static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5172    auto mem_info = getMemObjInfo(my_data, mem);
5173    if (mem_info) {
5174        mem_info->mem_range.offset = offset;
5175        mem_info->mem_range.size = size;
5176    }
5177}
5178
5179static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
5180    bool skip_call = false;
5181    auto mem_info = getMemObjInfo(my_data, mem);
5182    if (mem_info) {
5183        if (!mem_info->mem_range.size) {
5184            // Valid Usage: memory must currently be mapped
5185            skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5186                                (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5187                                "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
5188        }
5189        mem_info->mem_range.size = 0;
5190        if (mem_info->shadow_copy) {
5191            free(mem_info->shadow_copy_base);
5192            mem_info->shadow_copy_base = 0;
5193            mem_info->shadow_copy = 0;
5194        }
5195    }
5196    return skip_call;
5197}
5198
5199// Guard value for pad data
5200static char NoncoherentMemoryFillValue = 0xb;
5201
5202static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
5203                                     void **ppData) {
5204    auto mem_info = getMemObjInfo(dev_data, mem);
5205    if (mem_info) {
5206        mem_info->p_driver_data = *ppData;
5207        uint32_t index = mem_info->alloc_info.memoryTypeIndex;
5208        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
5209            mem_info->shadow_copy = 0;
5210        } else {
5211            if (size == VK_WHOLE_SIZE) {
5212                size = mem_info->alloc_info.allocationSize - offset;
5213            }
5214            mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5215            assert(vk_safe_modulo(mem_info->shadow_pad_size,
5216                                  dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) == 0);
5217            // Ensure start of mapped region reflects hardware alignment constraints
5218            uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5219
5220            // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
5221            uint64_t start_offset = offset % map_alignment;
5222            // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
5223            mem_info->shadow_copy_base = malloc(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset);
5224
5225            mem_info->shadow_copy =
5226                reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
5227                                         ~(map_alignment - 1)) + start_offset;
5228            assert(vk_safe_modulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
5229                                  map_alignment) == 0);
5230
5231            memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, 2 * mem_info->shadow_pad_size + size);
5232            *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
5233        }
5234    }
5235}
5236
5237// Verify that state for fence being waited on is appropriate. That is,
5238//  a fence being waited on should not already be signaled and
5239//  it should have been submitted on a queue or during acquire next image
5240static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
5241    bool skip_call = false;
5242
5243    auto pFence = getFenceNode(dev_data, fence);
5244    if (pFence) {
5245        if (pFence->state == FENCE_UNSIGNALED) {
5246            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5247                                 reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5248                                 "%s called for fence 0x%" PRIxLEAST64 " which has not been submitted on a Queue or during "
5249                                 "acquire next image.",
5250                                 apiCall, reinterpret_cast<uint64_t &>(fence));
5251        }
5252    }
5253    return skip_call;
5254}
5255
5256static bool RetireFence(layer_data *dev_data, VkFence fence) {
5257    auto pFence = getFenceNode(dev_data, fence);
5258    if (pFence->signaler.first != VK_NULL_HANDLE) {
5259        /* Fence signaller is a queue -- use this as proof that prior operations
5260         * on that queue have completed.
5261         */
5262        return RetireWorkOnQueue(dev_data,
5263                                 getQueueNode(dev_data, pFence->signaler.first),
5264                                 pFence->signaler.second);
5265    }
5266    else {
5267        /* Fence signaller is the WSI. We're not tracking what the WSI op
5268         * actually /was/ in CV yet, but we need to mark the fence as retired.
5269         */
5270        pFence->state = FENCE_RETIRED;
5271        return false;
5272    }
5273}
5274
5275VKAPI_ATTR VkResult VKAPI_CALL
5276WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
5277    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5278    bool skip_call = false;
5279    // Verify fence status of submitted fences
5280    std::unique_lock<std::mutex> lock(global_lock);
5281    for (uint32_t i = 0; i < fenceCount; i++) {
5282        skip_call |= verifyWaitFenceState(dev_data, pFences[i], "vkWaitForFences");
5283    }
5284    lock.unlock();
5285    if (skip_call)
5286        return VK_ERROR_VALIDATION_FAILED_EXT;
5287
5288    VkResult result = dev_data->device_dispatch_table->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
5289
5290    if (result == VK_SUCCESS) {
5291        lock.lock();
5292        // When we know that all fences are complete we can clean/remove their CBs
5293        if (waitAll || fenceCount == 1) {
5294            for (uint32_t i = 0; i < fenceCount; i++) {
5295                skip_call |= RetireFence(dev_data, pFences[i]);
5296            }
5297        }
5298        // NOTE : Alternate case not handled here is when some fences have completed. In
5299        //  this case for app to guarantee which fences completed it will have to call
5300        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
5301        lock.unlock();
5302    }
5303    if (skip_call)
5304        return VK_ERROR_VALIDATION_FAILED_EXT;
5305    return result;
5306}
5307
5308VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
5309    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5310    bool skip_call = false;
5311    std::unique_lock<std::mutex> lock(global_lock);
5312    skip_call = verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
5313    lock.unlock();
5314
5315    if (skip_call)
5316        return VK_ERROR_VALIDATION_FAILED_EXT;
5317
5318    VkResult result = dev_data->device_dispatch_table->GetFenceStatus(device, fence);
5319    lock.lock();
5320    if (result == VK_SUCCESS) {
5321        skip_call |= RetireFence(dev_data, fence);
5322    }
5323    lock.unlock();
5324    if (skip_call)
5325        return VK_ERROR_VALIDATION_FAILED_EXT;
5326    return result;
5327}
5328
5329VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
5330                                                            VkQueue *pQueue) {
5331    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5332    dev_data->device_dispatch_table->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
5333    std::lock_guard<std::mutex> lock(global_lock);
5334
5335    // Add queue to tracking set only if it is new
5336    auto result = dev_data->queues.emplace(*pQueue);
5337    if (result.second == true) {
5338        QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
5339        pQNode->queue = *pQueue;
5340        pQNode->queueFamilyIndex = queueFamilyIndex;
5341        pQNode->seq = 0;
5342    }
5343}
5344
5345VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
5346    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5347    bool skip_call = false;
5348    std::unique_lock<std::mutex> lock(global_lock);
5349    auto pQueue = getQueueNode(dev_data, queue);
5350    skip_call |= RetireWorkOnQueue(dev_data, pQueue, pQueue->seq + pQueue->submissions.size());
5351    lock.unlock();
5352    if (skip_call)
5353        return VK_ERROR_VALIDATION_FAILED_EXT;
5354    VkResult result = dev_data->device_dispatch_table->QueueWaitIdle(queue);
5355    return result;
5356}
5357
5358VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
5359    bool skip_call = false;
5360    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5361    std::unique_lock<std::mutex> lock(global_lock);
5362    for (auto & queue : dev_data->queueMap) {
5363        skip_call |= RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
5364    }
5365    lock.unlock();
5366    if (skip_call)
5367        return VK_ERROR_VALIDATION_FAILED_EXT;
5368    VkResult result = dev_data->device_dispatch_table->DeviceWaitIdle(device);
5369    return result;
5370}
5371
5372VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
5373    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5374    bool skip_call = false;
5375    std::unique_lock<std::mutex> lock(global_lock);
5376    auto fence_pair = dev_data->fenceMap.find(fence);
5377    if (fence_pair != dev_data->fenceMap.end()) {
5378        if (fence_pair->second.state == FENCE_INFLIGHT) {
5379            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5380                                 (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", "Fence 0x%" PRIx64 " is in use.",
5381                                 (uint64_t)(fence));
5382        }
5383        dev_data->fenceMap.erase(fence_pair);
5384    }
5385    lock.unlock();
5386
5387    if (!skip_call)
5388        dev_data->device_dispatch_table->DestroyFence(device, fence, pAllocator);
5389}
5390
5391// For given obj node, if it is use, flag a validation error and return callback result, else return false
5392bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct) {
5393    bool skip = false;
5394    if (obj_node->in_use.load()) {
5395        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_struct.type, obj_struct.handle, __LINE__,
5396                        DRAWSTATE_OBJECT_INUSE, "DS", "Cannot delete %s 0x%" PRIx64 " that is currently in use by a command buffer.",
5397                        object_type_to_string(obj_struct.type), obj_struct.handle);
5398    }
5399    return skip;
5400}
5401
5402VKAPI_ATTR void VKAPI_CALL
5403DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5404    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5405    bool skip = false;
5406    std::unique_lock<std::mutex> lock(global_lock);
5407    auto sema_node = getSemaphoreNode(dev_data, semaphore);
5408    if (sema_node) {
5409        skip |= ValidateObjectNotInUse(dev_data, sema_node,
5410                                       {reinterpret_cast<uint64_t &>(semaphore), VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT});
5411    }
5412    if (!skip) {
5413        dev_data->semaphoreMap.erase(semaphore);
5414        lock.unlock();
5415        dev_data->device_dispatch_table->DestroySemaphore(device, semaphore, pAllocator);
5416    }
5417}
5418
5419VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5420    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5421    bool skip = false;
5422    std::unique_lock<std::mutex> lock(global_lock);
5423    auto event_node = getEventNode(dev_data, event);
5424    if (event_node) {
5425        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT};
5426        skip |= ValidateObjectNotInUse(dev_data, event_node, obj_struct);
5427        // Any bound cmd buffers are now invalid
5428        invalidateCommandBuffers(event_node->cb_bindings, obj_struct);
5429    }
5430    if (!skip) {
5431        dev_data->eventMap.erase(event);
5432        lock.unlock();
5433        dev_data->device_dispatch_table->DestroyEvent(device, event, pAllocator);
5434    }
5435}
5436
5437VKAPI_ATTR void VKAPI_CALL
5438DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5439    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5440    bool skip = false;
5441    std::unique_lock<std::mutex> lock(global_lock);
5442    auto qp_node = getQueryPoolNode(dev_data, queryPool);
5443    if (qp_node) {
5444        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT};
5445        skip |= ValidateObjectNotInUse(dev_data, qp_node, obj_struct);
5446        // Any bound cmd buffers are now invalid
5447        invalidateCommandBuffers(qp_node->cb_bindings, obj_struct);
5448    }
5449    if (!skip) {
5450        dev_data->queryPoolMap.erase(queryPool);
5451        lock.unlock();
5452        dev_data->device_dispatch_table->DestroyQueryPool(device, queryPool, pAllocator);
5453    }
5454}
5455
5456VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
5457                                                   uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
5458                                                   VkQueryResultFlags flags) {
5459    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5460    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
5461    std::unique_lock<std::mutex> lock(global_lock);
5462    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5463        auto pCB = getCBNode(dev_data, cmdBuffer);
5464        for (auto queryStatePair : pCB->queryToStateMap) {
5465            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
5466        }
5467    }
5468    bool skip_call = false;
5469    for (uint32_t i = 0; i < queryCount; ++i) {
5470        QueryObject query = {queryPool, firstQuery + i};
5471        auto queryElement = queriesInFlight.find(query);
5472        auto queryToStateElement = dev_data->queryToStateMap.find(query);
5473        if (queryToStateElement != dev_data->queryToStateMap.end()) {
5474            // Available and in flight
5475            if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5476                queryToStateElement->second) {
5477                for (auto cmdBuffer : queryElement->second) {
5478                    auto pCB = getCBNode(dev_data, cmdBuffer);
5479                    auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
5480                    if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
5481                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5482                                             VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5483                                             "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
5484                                             (uint64_t)(queryPool), firstQuery + i);
5485                    } else {
5486                        for (auto event : queryEventElement->second) {
5487                            dev_data->eventMap[event].needsSignaled = true;
5488                        }
5489                    }
5490                }
5491                // Unavailable and in flight
5492            } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5493                       !queryToStateElement->second) {
5494                // TODO : Can there be the same query in use by multiple command buffers in flight?
5495                bool make_available = false;
5496                for (auto cmdBuffer : queryElement->second) {
5497                    auto pCB = getCBNode(dev_data, cmdBuffer);
5498                    make_available |= pCB->queryToStateMap[query];
5499                }
5500                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5501                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5502                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5503                                         "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5504                                         (uint64_t)(queryPool), firstQuery + i);
5505                }
5506                // Unavailable
5507            } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5508                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5509                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5510                                     "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5511                                     (uint64_t)(queryPool), firstQuery + i);
5512                // Unitialized
5513            } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5514                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5515                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5516                                     "Cannot get query results on queryPool 0x%" PRIx64
5517                                     " with index %d as data has not been collected for this index.",
5518                                     (uint64_t)(queryPool), firstQuery + i);
5519            }
5520        }
5521    }
5522    lock.unlock();
5523    if (skip_call)
5524        return VK_ERROR_VALIDATION_FAILED_EXT;
5525    return dev_data->device_dispatch_table->GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride,
5526                                                                flags);
5527}
5528
5529static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5530    bool skip_call = false;
5531    auto buffer_node = getBufferNode(my_data, buffer);
5532    if (!buffer_node) {
5533        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5534                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5535                             "Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5536    } else {
5537        if (buffer_node->in_use.load()) {
5538            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5539                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5540                                 "Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5541        }
5542    }
5543    return skip_call;
5544}
5545
5546// Return true if given ranges intersect, else false
5547// Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
5548//  in an error so not checking that here
5549// pad_ranges bool indicates a linear and non-linear comparison which requires padding
5550// In the case where padding is required, if an alias is encountered then a validation error is reported and skip_call
5551//  may be set by the callback function so caller should merge in skip_call value if padding case is possible.
5552static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip_call) {
5553    *skip_call = false;
5554    auto r1_start = range1->start;
5555    auto r1_end = range1->end;
5556    auto r2_start = range2->start;
5557    auto r2_end = range2->end;
5558    VkDeviceSize pad_align = 1;
5559    if (range1->linear != range2->linear) {
5560        pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
5561    }
5562    if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1)))
5563        return false;
5564    if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1)))
5565        return false;
5566
5567    if (range1->linear != range2->linear) {
5568        // In linear vs. non-linear case, it's an error to alias
5569        const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
5570        const char *r1_type_str = range1->image ? "image" : "buffer";
5571        const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
5572        const char *r2_type_str = range2->image ? "image" : "buffer";
5573        auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
5574        *skip_call |=
5575            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, range1->handle, 0, MEMTRACK_INVALID_ALIASING,
5576                    "MEM", "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
5577                           " which is in violation of the Buffer-Image Granularity section of the Vulkan specification.",
5578                    r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
5579    }
5580    // Ranges intersect
5581    return true;
5582}
5583// Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
5584static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
5585    // Create a local MEMORY_RANGE struct to wrap offset/size
5586    MEMORY_RANGE range_wrap;
5587    // Synch linear with range1 to avoid padding and potential validation error case
5588    range_wrap.linear = range1->linear;
5589    range_wrap.start = offset;
5590    range_wrap.end = end;
5591    bool tmp_bool;
5592    return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool);
5593}
5594// For given mem_info, set all ranges valid that intersect [offset-end] range
5595// TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid
5596static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) {
5597    bool tmp_bool = false;
5598    MEMORY_RANGE map_range;
5599    map_range.linear = true;
5600    map_range.start = offset;
5601    map_range.end = end;
5602    for (auto &handle_range_pair : mem_info->bound_ranges) {
5603        if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool)) {
5604            // TODO : WARN here if tmp_bool true?
5605            handle_range_pair.second.valid = true;
5606        }
5607    }
5608}
5609// Object with given handle is being bound to memory w/ given mem_info struct.
5610//  Track the newly bound memory range with given memoryOffset
5611//  Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
5612//  and non-linear range incorrectly overlap.
5613// Return true if an error is flagged and the user callback returns "true", otherwise false
5614// is_image indicates an image object, otherwise handle is for a buffer
5615// is_linear indicates a buffer or linear image
5616static bool InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
5617                              VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
5618    bool skip_call = false;
5619    MEMORY_RANGE range;
5620
5621    range.image = is_image;
5622    range.handle = handle;
5623    range.linear = is_linear;
5624    range.valid = mem_info->global_valid;
5625    range.memory = mem_info->mem;
5626    range.start = memoryOffset;
5627    range.size = memRequirements.size;
5628    range.end = memoryOffset + memRequirements.size - 1;
5629    range.aliases.clear();
5630    // Update Memory aliasing
5631    // Save aliase ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
5632    // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
5633    std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
5634    for (auto &obj_range_pair : mem_info->bound_ranges) {
5635        auto check_range = &obj_range_pair.second;
5636        bool intersection_error = false;
5637        if (rangesIntersect(dev_data, &range, check_range, &intersection_error)) {
5638            skip_call |= intersection_error;
5639            range.aliases.insert(check_range);
5640            tmp_alias_ranges.insert(check_range);
5641        }
5642    }
5643    mem_info->bound_ranges[handle] = std::move(range);
5644    for (auto tmp_range : tmp_alias_ranges) {
5645        tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
5646    }
5647    if (is_image)
5648        mem_info->bound_images.insert(handle);
5649    else
5650        mem_info->bound_buffers.insert(handle);
5651
5652    return skip_call;
5653}
5654
5655static bool InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5656                                   VkMemoryRequirements mem_reqs, bool is_linear) {
5657    return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(image), mem_info, mem_offset, mem_reqs, true, is_linear);
5658}
5659
5660static bool InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5661                                    VkMemoryRequirements mem_reqs) {
5662    return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(buffer), mem_info, mem_offset, mem_reqs, false, true);
5663}
5664
5665// Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
5666//  is_image indicates if handle is for image or buffer
5667//  This function will also remove the handle-to-index mapping from the appropriate
5668//  map and clean up any aliases for range being removed.
5669static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
5670    auto erase_range = &mem_info->bound_ranges[handle];
5671    for (auto alias_range : erase_range->aliases) {
5672        alias_range->aliases.erase(erase_range);
5673    }
5674    erase_range->aliases.clear();
5675    mem_info->bound_ranges.erase(handle);
5676    if (is_image) {
5677        mem_info->bound_images.erase(handle);
5678    } else {
5679        mem_info->bound_buffers.erase(handle);
5680    }
5681}
5682
5683static void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
5684
5685static void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
5686
5687VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer,
5688                                         const VkAllocationCallbacks *pAllocator) {
5689    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5690    std::unique_lock<std::mutex> lock(global_lock);
5691    if (!validateIdleBuffer(dev_data, buffer)) {
5692        // Clean up memory binding and range information for buffer
5693        auto buff_node = getBufferNode(dev_data, buffer);
5694        if (buff_node) {
5695            // Any bound cmd buffers are now invalid
5696            invalidateCommandBuffers(buff_node->cb_bindings,
5697                                     {reinterpret_cast<uint64_t &>(buff_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
5698            auto mem_info = getMemObjInfo(dev_data, buff_node->mem);
5699            if (mem_info) {
5700                RemoveBufferMemoryRange(reinterpret_cast<uint64_t &>(buffer), mem_info);
5701            }
5702            clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5703            dev_data->bufferMap.erase(buff_node->buffer);
5704        }
5705        lock.unlock();
5706        dev_data->device_dispatch_table->DestroyBuffer(device, buffer, pAllocator);
5707    }
5708}
5709
5710VKAPI_ATTR void VKAPI_CALL
5711DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5712    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5713
5714    std::unique_lock<std::mutex> lock(global_lock);
5715    auto view_state = getBufferViewState(dev_data, bufferView);
5716    if (view_state) {
5717        dev_data->bufferViewMap.erase(bufferView);
5718    }
5719    lock.unlock();
5720    dev_data->device_dispatch_table->DestroyBufferView(device, bufferView, pAllocator);
5721}
5722
5723VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5724    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5725    bool skip = false;
5726    std::unique_lock<std::mutex> lock(global_lock);
5727    auto img_node = getImageNode(dev_data, image);
5728    if (img_node) {
5729        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(img_node->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT};
5730        // Any bound cmd buffers are now invalid
5731        invalidateCommandBuffers(img_node->cb_bindings, obj_struct);
5732        skip |= ValidateObjectNotInUse(dev_data, img_node, obj_struct);
5733    }
5734    if (!skip) {
5735        // Clean up memory mapping, bindings and range references for image
5736        auto mem_info = getMemObjInfo(dev_data, img_node->mem);
5737        if (mem_info) {
5738            RemoveImageMemoryRange(reinterpret_cast<uint64_t &>(image), mem_info);
5739            clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5740        }
5741        // Remove image from imageMap
5742        dev_data->imageMap.erase(img_node->image);
5743
5744        const auto &subEntry = dev_data->imageSubresourceMap.find(image);
5745        if (subEntry != dev_data->imageSubresourceMap.end()) {
5746            for (const auto &pair : subEntry->second) {
5747                dev_data->imageLayoutMap.erase(pair);
5748            }
5749            dev_data->imageSubresourceMap.erase(subEntry);
5750        }
5751        lock.unlock();
5752        dev_data->device_dispatch_table->DestroyImage(device, image, pAllocator);
5753    }
5754}
5755
5756static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
5757                                  const char *funcName) {
5758    bool skip_call = false;
5759    if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
5760        skip_call = log_msg(
5761            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5762            reinterpret_cast<const uint64_t &>(mem_info->mem), __LINE__, MEMTRACK_INVALID_MEM_TYPE, "MT",
5763            "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
5764            "type (0x%X) of this memory object 0x%" PRIx64 ".",
5765            funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, reinterpret_cast<const uint64_t &>(mem_info->mem));
5766    }
5767    return skip_call;
5768}
5769
5770VKAPI_ATTR VkResult VKAPI_CALL
5771BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5772    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5773    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5774    std::unique_lock<std::mutex> lock(global_lock);
5775    // Track objects tied to memory
5776    uint64_t buffer_handle = reinterpret_cast<uint64_t &>(buffer);
5777    bool skip_call = SetMemBinding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5778    auto buffer_node = getBufferNode(dev_data, buffer);
5779    if (buffer_node) {
5780        VkMemoryRequirements memRequirements;
5781        dev_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, &memRequirements);
5782        buffer_node->mem = mem;
5783        buffer_node->memOffset = memoryOffset;
5784        buffer_node->memSize = memRequirements.size;
5785
5786        // Track and validate bound memory range information
5787        auto mem_info = getMemObjInfo(dev_data, mem);
5788        if (mem_info) {
5789            skip_call |= InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, memRequirements);
5790            skip_call |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "BindBufferMemory");
5791        }
5792
5793        // Validate memory requirements alignment
5794        if (vk_safe_modulo(memoryOffset, memRequirements.alignment) != 0) {
5795            skip_call |=
5796                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
5797                        __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS",
5798                        "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the "
5799                        "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
5800                        ", returned from a call to vkGetBufferMemoryRequirements with buffer",
5801                        memoryOffset, memRequirements.alignment);
5802        }
5803
5804        // Validate device limits alignments
5805        static const VkBufferUsageFlagBits usage_list[3] = {
5806            static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
5807            VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
5808            VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
5809        static const char *memory_type[3] = {"texel",
5810                                             "uniform",
5811                                             "storage"};
5812        static const char *offset_name[3] = {
5813            "minTexelBufferOffsetAlignment",
5814            "minUniformBufferOffsetAlignment",
5815            "minStorageBufferOffsetAlignment"
5816        };
5817
5818        // Keep this one fresh!
5819        const VkDeviceSize offset_requirement[3] = {
5820            dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
5821            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
5822            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment
5823        };
5824        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
5825
5826        for (int i = 0; i < 3; i++) {
5827            if (usage & usage_list[i]) {
5828                if (vk_safe_modulo(memoryOffset, offset_requirement[i]) != 0) {
5829                    skip_call |=
5830                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5831                                0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS",
5832                                "vkBindBufferMemory(): %s memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5833                                "device limit %s 0x%" PRIxLEAST64,
5834                                memory_type[i], memoryOffset, offset_name[i], offset_requirement[i]);
5835                }
5836            }
5837        }
5838    }
5839    print_mem_list(dev_data);
5840    lock.unlock();
5841    if (!skip_call) {
5842        result = dev_data->device_dispatch_table->BindBufferMemory(device, buffer, mem, memoryOffset);
5843    }
5844    return result;
5845}
5846
5847VKAPI_ATTR void VKAPI_CALL
5848GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5849    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5850    // TODO : What to track here?
5851    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
5852    my_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5853}
5854
5855VKAPI_ATTR void VKAPI_CALL
5856GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5857    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5858    // TODO : What to track here?
5859    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
5860    my_data->device_dispatch_table->GetImageMemoryRequirements(device, image, pMemoryRequirements);
5861}
5862
5863VKAPI_ATTR void VKAPI_CALL
5864DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5865    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5866    bool skip = false;
5867    std::unique_lock<std::mutex> lock(global_lock);
5868    auto view_state = getImageViewState(dev_data, imageView);
5869    if (view_state) {
5870        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(imageView), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT};
5871        skip |= ValidateObjectNotInUse(dev_data, view_state, obj_struct);
5872        // Any bound cmd buffers are now invalid
5873        invalidateCommandBuffers(view_state->cb_bindings, obj_struct);
5874    }
5875    if (!skip) {
5876        dev_data->imageViewMap.erase(imageView);
5877        lock.unlock();
5878        dev_data->device_dispatch_table->DestroyImageView(device, imageView, pAllocator);
5879    }
5880}
5881
5882VKAPI_ATTR void VKAPI_CALL
5883DestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
5884    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5885
5886    std::unique_lock<std::mutex> lock(global_lock);
5887    my_data->shaderModuleMap.erase(shaderModule);
5888    lock.unlock();
5889
5890    my_data->device_dispatch_table->DestroyShaderModule(device, shaderModule, pAllocator);
5891}
5892
5893VKAPI_ATTR void VKAPI_CALL
5894DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
5895    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5896    bool skip = false;
5897    std::unique_lock<std::mutex> lock(global_lock);
5898    auto pipe_node = getPipeline(dev_data, pipeline);
5899    if (pipe_node) {
5900        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT};
5901        skip |= ValidateObjectNotInUse(dev_data, pipe_node, obj_struct);
5902        // Any bound cmd buffers are now invalid
5903        invalidateCommandBuffers(pipe_node->cb_bindings, obj_struct);
5904    }
5905    if (!skip) {
5906        dev_data->pipelineMap.erase(pipeline);
5907        lock.unlock();
5908        dev_data->device_dispatch_table->DestroyPipeline(device, pipeline, pAllocator);
5909    }
5910}
5911
5912VKAPI_ATTR void VKAPI_CALL
5913DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
5914    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5915    std::unique_lock<std::mutex> lock(global_lock);
5916    dev_data->pipelineLayoutMap.erase(pipelineLayout);
5917    lock.unlock();
5918
5919    dev_data->device_dispatch_table->DestroyPipelineLayout(device, pipelineLayout, pAllocator);
5920}
5921
5922VKAPI_ATTR void VKAPI_CALL
5923DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
5924    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5925    bool skip = false;
5926    std::unique_lock<std::mutex> lock(global_lock);
5927    auto sampler_node = getSamplerNode(dev_data, sampler);
5928    if (sampler_node) {
5929        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT};
5930        skip |= ValidateObjectNotInUse(dev_data, sampler_node, obj_struct);
5931        // Any bound cmd buffers are now invalid
5932        invalidateCommandBuffers(sampler_node->cb_bindings, obj_struct);
5933    }
5934    if (!skip) {
5935        dev_data->samplerMap.erase(sampler);
5936        lock.unlock();
5937        dev_data->device_dispatch_table->DestroySampler(device, sampler, pAllocator);
5938    }
5939}
5940
5941VKAPI_ATTR void VKAPI_CALL
5942DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
5943    // TODO : Clean up any internal data structures using this obj.
5944    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5945        ->device_dispatch_table->DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
5946}
5947
5948VKAPI_ATTR void VKAPI_CALL
5949DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
5950    // TODO : Clean up any internal data structures using this obj.
5951    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
5952        ->device_dispatch_table->DestroyDescriptorPool(device, descriptorPool, pAllocator);
5953}
5954// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result
5955//  If this is a secondary command buffer, then make sure its primary is also in-flight
5956//  If primary is not in-flight, then remove secondary from global in-flight set
5957// This function is only valid at a point when cmdBuffer is being reset or freed
5958static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action) {
5959    bool skip_call = false;
5960    if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
5961        // Primary CB or secondary where primary is also in-flight is an error
5962        if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
5963            (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
5964            skip_call |= log_msg(
5965                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
5966                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
5967                "Attempt to %s command buffer (0x%" PRIxLEAST64 ") which is in use.", action,
5968                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer));
5969        }
5970    }
5971    return skip_call;
5972}
5973
5974// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
5975static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action) {
5976    bool skip_call = false;
5977    for (auto cmd_buffer : pPool->commandBuffers) {
5978        if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
5979            skip_call |= checkCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action);
5980        }
5981    }
5982    return skip_call;
5983}
5984
5985static void clearCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool) {
5986    for (auto cmd_buffer : pPool->commandBuffers) {
5987        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
5988    }
5989}
5990
5991VKAPI_ATTR void VKAPI_CALL
5992FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
5993    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5994    bool skip_call = false;
5995    std::unique_lock<std::mutex> lock(global_lock);
5996
5997    for (uint32_t i = 0; i < commandBufferCount; i++) {
5998        auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
5999        // Delete CB information structure, and remove from commandBufferMap
6000        if (cb_node) {
6001            skip_call |= checkCommandBufferInFlight(dev_data, cb_node, "free");
6002        }
6003    }
6004
6005    if (skip_call)
6006        return;
6007
6008    auto pPool = getCommandPoolNode(dev_data, commandPool);
6009    for (uint32_t i = 0; i < commandBufferCount; i++) {
6010        auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
6011        // Delete CB information structure, and remove from commandBufferMap
6012        if (cb_node) {
6013            dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer);
6014            // reset prior to delete for data clean-up
6015            resetCB(dev_data, cb_node->commandBuffer);
6016            dev_data->commandBufferMap.erase(cb_node->commandBuffer);
6017            delete cb_node;
6018        }
6019
6020        // Remove commandBuffer reference from commandPoolMap
6021        pPool->commandBuffers.remove(pCommandBuffers[i]);
6022    }
6023    printCBList(dev_data);
6024    lock.unlock();
6025
6026    dev_data->device_dispatch_table->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
6027}
6028
6029VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
6030                                                 const VkAllocationCallbacks *pAllocator,
6031                                                 VkCommandPool *pCommandPool) {
6032    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6033
6034    VkResult result = dev_data->device_dispatch_table->CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
6035
6036    if (VK_SUCCESS == result) {
6037        std::lock_guard<std::mutex> lock(global_lock);
6038        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
6039        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
6040    }
6041    return result;
6042}
6043
6044VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
6045                                               const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
6046
6047    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6048    VkResult result = dev_data->device_dispatch_table->CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
6049    if (result == VK_SUCCESS) {
6050        std::lock_guard<std::mutex> lock(global_lock);
6051        QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
6052        qp_node->createInfo = *pCreateInfo;
6053    }
6054    return result;
6055}
6056
6057// Destroy commandPool along with all of the commandBuffers allocated from that pool
6058VKAPI_ATTR void VKAPI_CALL
6059DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
6060    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6061    bool skip_call = false;
6062    std::unique_lock<std::mutex> lock(global_lock);
6063    // Verify that command buffers in pool are complete (not in-flight)
6064    auto pPool = getCommandPoolNode(dev_data, commandPool);
6065    skip_call |= checkCommandBuffersInFlight(dev_data, pPool, "destroy command pool with");
6066
6067    if (skip_call)
6068        return;
6069    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandBufferMap
6070    clearCommandBuffersInFlight(dev_data, pPool);
6071    for (auto cb : pPool->commandBuffers) {
6072        clear_cmd_buf_and_mem_references(dev_data, cb);
6073        auto cb_node = getCBNode(dev_data, cb);
6074        // Remove references to this cb_node prior to delete
6075        // TODO : Need better solution here, resetCB?
6076        for (auto obj : cb_node->object_bindings) {
6077            removeCommandBufferBinding(dev_data, &obj, cb_node);
6078        }
6079        for (auto framebuffer : cb_node->framebuffers) {
6080            auto fb_node = getFramebuffer(dev_data, framebuffer);
6081            if (fb_node)
6082                fb_node->cb_bindings.erase(cb_node);
6083        }
6084        dev_data->commandBufferMap.erase(cb); // Remove this command buffer
6085        delete cb_node;                       // delete CB info structure
6086    }
6087    dev_data->commandPoolMap.erase(commandPool);
6088    lock.unlock();
6089
6090    dev_data->device_dispatch_table->DestroyCommandPool(device, commandPool, pAllocator);
6091}
6092
6093VKAPI_ATTR VkResult VKAPI_CALL
6094ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
6095    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6096    bool skip_call = false;
6097
6098    std::unique_lock<std::mutex> lock(global_lock);
6099    auto pPool = getCommandPoolNode(dev_data, commandPool);
6100    skip_call |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with");
6101    lock.unlock();
6102
6103    if (skip_call)
6104        return VK_ERROR_VALIDATION_FAILED_EXT;
6105
6106    VkResult result = dev_data->device_dispatch_table->ResetCommandPool(device, commandPool, flags);
6107
6108    // Reset all of the CBs allocated from this pool
6109    if (VK_SUCCESS == result) {
6110        lock.lock();
6111        clearCommandBuffersInFlight(dev_data, pPool);
6112        for (auto cmdBuffer : pPool->commandBuffers) {
6113            resetCB(dev_data, cmdBuffer);
6114        }
6115        lock.unlock();
6116    }
6117    return result;
6118}
6119
6120VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
6121    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6122    bool skip_call = false;
6123    std::unique_lock<std::mutex> lock(global_lock);
6124    for (uint32_t i = 0; i < fenceCount; ++i) {
6125        auto pFence = getFenceNode(dev_data, pFences[i]);
6126        if (pFence && pFence->state == FENCE_INFLIGHT) {
6127            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6128                                 reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
6129                                 "Fence 0x%" PRIx64 " is in use.", reinterpret_cast<const uint64_t &>(pFences[i]));
6130        }
6131    }
6132    lock.unlock();
6133
6134    if (skip_call)
6135        return VK_ERROR_VALIDATION_FAILED_EXT;
6136
6137    VkResult result = dev_data->device_dispatch_table->ResetFences(device, fenceCount, pFences);
6138
6139    if (result == VK_SUCCESS) {
6140        lock.lock();
6141        for (uint32_t i = 0; i < fenceCount; ++i) {
6142            auto pFence = getFenceNode(dev_data, pFences[i]);
6143            if (pFence) {
6144                pFence->state = FENCE_UNSIGNALED;
6145            }
6146        }
6147        lock.unlock();
6148    }
6149
6150    return result;
6151}
6152
6153// For given cb_nodes, invalidate them and track object causing invalidation
6154void invalidateCommandBuffers(std::unordered_set<GLOBAL_CB_NODE *> cb_nodes, VK_OBJECT obj) {
6155    for (auto cb_node : cb_nodes) {
6156        cb_node->state = CB_INVALID;
6157        cb_node->broken_bindings.push_back(obj);
6158    }
6159}
6160
6161VKAPI_ATTR void VKAPI_CALL
6162DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
6163    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6164    std::unique_lock<std::mutex> lock(global_lock);
6165    auto fb_node = getFramebuffer(dev_data, framebuffer);
6166    if (fb_node) {
6167        invalidateCommandBuffers(fb_node->cb_bindings,
6168                                 {reinterpret_cast<uint64_t &>(fb_node->framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT});
6169        dev_data->frameBufferMap.erase(fb_node->framebuffer);
6170    }
6171    lock.unlock();
6172    dev_data->device_dispatch_table->DestroyFramebuffer(device, framebuffer, pAllocator);
6173}
6174
6175VKAPI_ATTR void VKAPI_CALL
6176DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
6177    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6178    bool skip = false;
6179    std::unique_lock<std::mutex> lock(global_lock);
6180    auto rp_state = getRenderPass(dev_data, renderPass);
6181    if (rp_state) {
6182        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT};
6183        skip |= ValidateObjectNotInUse(dev_data, rp_state, obj_struct);
6184        // Any bound cmd buffers are now invalid
6185        invalidateCommandBuffers(rp_state->cb_bindings, obj_struct);
6186    }
6187    if (!skip) {
6188        // TODO: leaking all the guts of the renderpass node here!
6189        dev_data->renderPassMap.erase(renderPass);
6190        lock.unlock();
6191        dev_data->device_dispatch_table->DestroyRenderPass(device, renderPass, pAllocator);
6192    }
6193}
6194
6195VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
6196                                            const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
6197    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6198
6199    VkResult result = dev_data->device_dispatch_table->CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
6200
6201    if (VK_SUCCESS == result) {
6202        std::lock_guard<std::mutex> lock(global_lock);
6203        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
6204        dev_data->bufferMap.insert(std::make_pair(*pBuffer, unique_ptr<BUFFER_NODE>(new BUFFER_NODE(*pBuffer, pCreateInfo))));
6205    }
6206    return result;
6207}
6208
6209static bool PreCallValidateCreateBufferView(layer_data *dev_data, const VkBufferViewCreateInfo *pCreateInfo) {
6210    bool skip_call = false;
6211    BUFFER_NODE *buf_node = getBufferNode(dev_data, pCreateInfo->buffer);
6212    // If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time
6213    if (buf_node) {
6214        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buf_node, "vkCreateBufferView()");
6215        // In order to create a valid buffer view, the buffer must have been created with at least one of the
6216        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
6217        skip_call |= ValidateBufferUsageFlags(dev_data, buf_node,
6218                                              VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT,
6219                                              false, "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
6220    }
6221    return skip_call;
6222}
6223
6224VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6225                                                const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
6226    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6227    std::unique_lock<std::mutex> lock(global_lock);
6228    bool skip_call = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
6229    lock.unlock();
6230    if (skip_call)
6231        return VK_ERROR_VALIDATION_FAILED_EXT;
6232    VkResult result = dev_data->device_dispatch_table->CreateBufferView(device, pCreateInfo, pAllocator, pView);
6233    if (VK_SUCCESS == result) {
6234        lock.lock();
6235        dev_data->bufferViewMap[*pView] = unique_ptr<BUFFER_VIEW_STATE>(new BUFFER_VIEW_STATE(*pView, pCreateInfo));
6236        lock.unlock();
6237    }
6238    return result;
6239}
6240
6241VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
6242                                           const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
6243    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6244
6245    VkResult result = dev_data->device_dispatch_table->CreateImage(device, pCreateInfo, pAllocator, pImage);
6246
6247    if (VK_SUCCESS == result) {
6248        std::lock_guard<std::mutex> lock(global_lock);
6249        IMAGE_LAYOUT_NODE image_node;
6250        image_node.layout = pCreateInfo->initialLayout;
6251        image_node.format = pCreateInfo->format;
6252        dev_data->imageMap.insert(std::make_pair(*pImage, unique_ptr<IMAGE_NODE>(new IMAGE_NODE(*pImage, pCreateInfo))));
6253        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
6254        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
6255        dev_data->imageLayoutMap[subpair] = image_node;
6256    }
6257    return result;
6258}
6259
6260static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
6261    /* expects global_lock to be held by caller */
6262
6263    auto image_node = getImageNode(dev_data, image);
6264    if (image_node) {
6265        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
6266         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
6267         * the actual values.
6268         */
6269        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
6270            range->levelCount = image_node->createInfo.mipLevels - range->baseMipLevel;
6271        }
6272
6273        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
6274            range->layerCount = image_node->createInfo.arrayLayers - range->baseArrayLayer;
6275        }
6276    }
6277}
6278
6279// Return the correct layer/level counts if the caller used the special
6280// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
6281static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6282                                         VkImage image) {
6283    /* expects global_lock to be held by caller */
6284
6285    *levels = range.levelCount;
6286    *layers = range.layerCount;
6287    auto image_node = getImageNode(dev_data, image);
6288    if (image_node) {
6289        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
6290            *levels = image_node->createInfo.mipLevels - range.baseMipLevel;
6291        }
6292        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
6293            *layers = image_node->createInfo.arrayLayers - range.baseArrayLayer;
6294        }
6295    }
6296}
6297
6298static bool PreCallValidateCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *pCreateInfo) {
6299    bool skip_call = false;
6300    IMAGE_NODE *image_node = getImageNode(dev_data, pCreateInfo->image);
6301    if (image_node) {
6302        skip_call |= ValidateImageUsageFlags(
6303            dev_data, image_node, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
6304                                      VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6305            false, "vkCreateImageView()",
6306            "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT|DEPTH_STENCIL_ATTACHMENT|INPUT_ATTACHMENT]_BIT");
6307        // If this isn't a sparse image, it needs to have memory backing it at CreateImageView time
6308        skip_call |= ValidateMemoryIsBoundToImage(dev_data, image_node, "vkCreateImageView()");
6309    }
6310    return skip_call;
6311}
6312
6313static inline void PostCallRecordCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *pCreateInfo, VkImageView view) {
6314    dev_data->imageViewMap[view] = unique_ptr<IMAGE_VIEW_STATE>(new IMAGE_VIEW_STATE(view, pCreateInfo));
6315    ResolveRemainingLevelsLayers(dev_data, &dev_data->imageViewMap[view].get()->create_info.subresourceRange, pCreateInfo->image);
6316}
6317
6318VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6319                                               const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6320    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6321    std::unique_lock<std::mutex> lock(global_lock);
6322    bool skip_call = PreCallValidateCreateImageView(dev_data, pCreateInfo);
6323    lock.unlock();
6324    if (skip_call)
6325        return VK_ERROR_VALIDATION_FAILED_EXT;
6326    VkResult result = dev_data->device_dispatch_table->CreateImageView(device, pCreateInfo, pAllocator, pView);
6327    if (VK_SUCCESS == result) {
6328        lock.lock();
6329        PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
6330        lock.unlock();
6331    }
6332
6333    return result;
6334}
6335
6336VKAPI_ATTR VkResult VKAPI_CALL
6337CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6338    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6339    VkResult result = dev_data->device_dispatch_table->CreateFence(device, pCreateInfo, pAllocator, pFence);
6340    if (VK_SUCCESS == result) {
6341        std::lock_guard<std::mutex> lock(global_lock);
6342        auto &fence_node = dev_data->fenceMap[*pFence];
6343        fence_node.fence = *pFence;
6344        fence_node.createInfo = *pCreateInfo;
6345        fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
6346    }
6347    return result;
6348}
6349
6350// TODO handle pipeline caches
6351VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6352                                                   const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6353    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6354    VkResult result = dev_data->device_dispatch_table->CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6355    return result;
6356}
6357
6358VKAPI_ATTR void VKAPI_CALL
6359DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
6360    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6361    dev_data->device_dispatch_table->DestroyPipelineCache(device, pipelineCache, pAllocator);
6362}
6363
6364VKAPI_ATTR VkResult VKAPI_CALL
6365GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
6366    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6367    VkResult result = dev_data->device_dispatch_table->GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6368    return result;
6369}
6370
6371VKAPI_ATTR VkResult VKAPI_CALL
6372MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
6373    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6374    VkResult result = dev_data->device_dispatch_table->MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6375    return result;
6376}
6377
6378// utility function to set collective state for pipeline
6379void set_pipeline_state(PIPELINE_NODE *pPipe) {
6380    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
6381    if (pPipe->graphicsPipelineCI.pColorBlendState) {
6382        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
6383            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
6384                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6385                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6386                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6387                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6388                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6389                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6390                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6391                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
6392                    pPipe->blendConstantsEnabled = true;
6393                }
6394            }
6395        }
6396    }
6397}
6398
6399VKAPI_ATTR VkResult VKAPI_CALL
6400CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6401                        const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6402                        VkPipeline *pPipelines) {
6403    VkResult result = VK_SUCCESS;
6404    // TODO What to do with pipelineCache?
6405    // The order of operations here is a little convoluted but gets the job done
6406    //  1. Pipeline create state is first shadowed into PIPELINE_NODE struct
6407    //  2. Create state is then validated (which uses flags setup during shadowing)
6408    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6409    bool skip_call = false;
6410    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6411    vector<PIPELINE_NODE *> pPipeNode(count);
6412    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6413
6414    uint32_t i = 0;
6415    std::unique_lock<std::mutex> lock(global_lock);
6416
6417    for (i = 0; i < count; i++) {
6418        pPipeNode[i] = new PIPELINE_NODE;
6419        pPipeNode[i]->initGraphicsPipeline(&pCreateInfos[i]);
6420        pPipeNode[i]->render_pass_ci.initialize(getRenderPass(dev_data, pCreateInfos[i].renderPass)->pCreateInfo);
6421        pPipeNode[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6422
6423        skip_call |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
6424    }
6425
6426    if (!skip_call) {
6427        lock.unlock();
6428        result = dev_data->device_dispatch_table->CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6429                                                                          pPipelines);
6430        lock.lock();
6431        for (i = 0; i < count; i++) {
6432            pPipeNode[i]->pipeline = pPipelines[i];
6433            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6434        }
6435        lock.unlock();
6436    } else {
6437        for (i = 0; i < count; i++) {
6438            delete pPipeNode[i];
6439        }
6440        lock.unlock();
6441        return VK_ERROR_VALIDATION_FAILED_EXT;
6442    }
6443    return result;
6444}
6445
6446VKAPI_ATTR VkResult VKAPI_CALL
6447CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6448                       const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6449                       VkPipeline *pPipelines) {
6450    VkResult result = VK_SUCCESS;
6451    bool skip_call = false;
6452
6453    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6454    vector<PIPELINE_NODE *> pPipeNode(count);
6455    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6456
6457    uint32_t i = 0;
6458    std::unique_lock<std::mutex> lock(global_lock);
6459    for (i = 0; i < count; i++) {
6460        // TODO: Verify compute stage bits
6461
6462        // Create and initialize internal tracking data structure
6463        pPipeNode[i] = new PIPELINE_NODE;
6464        pPipeNode[i]->initComputePipeline(&pCreateInfos[i]);
6465        pPipeNode[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6466        // memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
6467
6468        // TODO: Add Compute Pipeline Verification
6469        skip_call |= !validate_compute_pipeline(dev_data->report_data, pPipeNode[i], &dev_data->phys_dev_properties.features,
6470                                                dev_data->shaderModuleMap);
6471        // skip_call |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
6472    }
6473
6474    if (!skip_call) {
6475        lock.unlock();
6476        result = dev_data->device_dispatch_table->CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator,
6477                                                                         pPipelines);
6478        lock.lock();
6479        for (i = 0; i < count; i++) {
6480            pPipeNode[i]->pipeline = pPipelines[i];
6481            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6482        }
6483        lock.unlock();
6484    } else {
6485        for (i = 0; i < count; i++) {
6486            // Clean up any locally allocated data structures
6487            delete pPipeNode[i];
6488        }
6489        lock.unlock();
6490        return VK_ERROR_VALIDATION_FAILED_EXT;
6491    }
6492    return result;
6493}
6494
6495VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6496                                             const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6497    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6498    VkResult result = dev_data->device_dispatch_table->CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6499    if (VK_SUCCESS == result) {
6500        std::lock_guard<std::mutex> lock(global_lock);
6501        dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
6502    }
6503    return result;
6504}
6505
6506VKAPI_ATTR VkResult VKAPI_CALL
6507CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6508                          const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
6509    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6510    VkResult result = dev_data->device_dispatch_table->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6511    if (VK_SUCCESS == result) {
6512        // TODOSC : Capture layout bindings set
6513        std::lock_guard<std::mutex> lock(global_lock);
6514        dev_data->descriptorSetLayoutMap[*pSetLayout] =
6515            new cvdescriptorset::DescriptorSetLayout(dev_data->report_data, pCreateInfo, *pSetLayout);
6516    }
6517    return result;
6518}
6519
6520// Used by CreatePipelineLayout and CmdPushConstants.
6521// Note that the index argument is optional and only used by CreatePipelineLayout.
6522static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6523                                      const char *caller_name, uint32_t index = 0) {
6524    uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
6525    bool skip_call = false;
6526    // Check that offset + size don't exceed the max.
6527    // Prevent arithetic overflow here by avoiding addition and testing in this order.
6528    if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
6529        // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
6530        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6531            skip_call |=
6532                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6533                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with offset %u and size %u that "
6534                                                              "exceeds this device's maxPushConstantSize of %u.",
6535                        caller_name, index, offset, size, maxPushConstantsSize);
6536        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6537            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6538                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
6539                                                                       "exceeds this device's maxPushConstantSize of %u.",
6540                                 caller_name, offset, size, maxPushConstantsSize);
6541        } else {
6542            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6543                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6544        }
6545    }
6546    // size needs to be non-zero and a multiple of 4.
6547    if ((size == 0) || ((size & 0x3) != 0)) {
6548        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6549            skip_call |=
6550                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6551                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
6552                                                              "size %u. Size must be greater than zero and a multiple of 4.",
6553                        caller_name, index, size);
6554        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6555            skip_call |=
6556                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6557                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
6558                                                              "size %u. Size must be greater than zero and a multiple of 4.",
6559                        caller_name, size);
6560        } else {
6561            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6562                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6563        }
6564    }
6565    // offset needs to be a multiple of 4.
6566    if ((offset & 0x3) != 0) {
6567        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6568            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6569                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
6570                                                                       "offset %u. Offset must be a multiple of 4.",
6571                                 caller_name, index, offset);
6572        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6573            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6574                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
6575                                                                       "offset %u. Offset must be a multiple of 4.",
6576                                 caller_name, offset);
6577        } else {
6578            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6579                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6580        }
6581    }
6582    return skip_call;
6583}
6584
6585VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6586                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
6587    bool skip_call = false;
6588    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6589    // Push Constant Range checks
6590    uint32_t i, j;
6591    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6592        skip_call |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
6593                                               pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
6594        if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
6595            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6596                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has no stageFlags set.");
6597        }
6598    }
6599    if (skip_call)
6600        return VK_ERROR_VALIDATION_FAILED_EXT;
6601
6602    // Each range has been validated.  Now check for overlap between ranges (if they are good).
6603    // There's no explicit Valid Usage language against this, so issue a warning instead of an error.
6604    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6605        for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
6606            const uint32_t minA = pCreateInfo->pPushConstantRanges[i].offset;
6607            const uint32_t maxA = minA + pCreateInfo->pPushConstantRanges[i].size;
6608            const uint32_t minB = pCreateInfo->pPushConstantRanges[j].offset;
6609            const uint32_t maxB = minB + pCreateInfo->pPushConstantRanges[j].size;
6610            if ((minA <= minB && maxA > minB) || (minB <= minA && maxB > minA)) {
6611                skip_call |=
6612                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6613                            DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constants with "
6614                                                                  "overlapping ranges: %u:[%u, %u), %u:[%u, %u)",
6615                            i, minA, maxA, j, minB, maxB);
6616            }
6617        }
6618    }
6619
6620    VkResult result = dev_data->device_dispatch_table->CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
6621    if (VK_SUCCESS == result) {
6622        std::lock_guard<std::mutex> lock(global_lock);
6623        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
6624        plNode.layout = *pPipelineLayout;
6625        plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
6626        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6627            plNode.set_layouts[i] = getDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
6628        }
6629        plNode.push_constant_ranges.resize(pCreateInfo->pushConstantRangeCount);
6630        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6631            plNode.push_constant_ranges[i] = pCreateInfo->pPushConstantRanges[i];
6632        }
6633    }
6634    return result;
6635}
6636
6637VKAPI_ATTR VkResult VKAPI_CALL
6638CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
6639                     VkDescriptorPool *pDescriptorPool) {
6640    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6641    VkResult result = dev_data->device_dispatch_table->CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
6642    if (VK_SUCCESS == result) {
6643        // Insert this pool into Global Pool LL at head
6644        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6645                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool 0x%" PRIxLEAST64,
6646                    (uint64_t)*pDescriptorPool))
6647            return VK_ERROR_VALIDATION_FAILED_EXT;
6648        DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
6649        if (NULL == pNewNode) {
6650            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6651                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6652                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
6653                return VK_ERROR_VALIDATION_FAILED_EXT;
6654        } else {
6655            std::lock_guard<std::mutex> lock(global_lock);
6656            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6657        }
6658    } else {
6659        // Need to do anything if pool create fails?
6660    }
6661    return result;
6662}
6663
6664VKAPI_ATTR VkResult VKAPI_CALL
6665ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
6666    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6667    VkResult result = dev_data->device_dispatch_table->ResetDescriptorPool(device, descriptorPool, flags);
6668    if (VK_SUCCESS == result) {
6669        std::lock_guard<std::mutex> lock(global_lock);
6670        clearDescriptorPool(dev_data, device, descriptorPool, flags);
6671    }
6672    return result;
6673}
6674// Ensure the pool contains enough descriptors and descriptor sets to satisfy
6675// an allocation request. Fills common_data with the total number of descriptors of each type required,
6676// as well as DescriptorSetLayout ptrs used for later update.
6677static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6678                                                  cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6679    // All state checks for AllocateDescriptorSets is done in single function
6680    return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data->report_data, pAllocateInfo, dev_data, common_data);
6681}
6682// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
6683static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6684                                                 VkDescriptorSet *pDescriptorSets,
6685                                                 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6686    // All the updates are contained in a single cvdescriptorset function
6687    cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
6688                                                   &dev_data->setMap, dev_data);
6689}
6690
6691VKAPI_ATTR VkResult VKAPI_CALL
6692AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
6693    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6694    std::unique_lock<std::mutex> lock(global_lock);
6695    cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
6696    bool skip_call = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
6697    lock.unlock();
6698
6699    if (skip_call)
6700        return VK_ERROR_VALIDATION_FAILED_EXT;
6701
6702    VkResult result = dev_data->device_dispatch_table->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6703
6704    if (VK_SUCCESS == result) {
6705        lock.lock();
6706        PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
6707        lock.unlock();
6708    }
6709    return result;
6710}
6711// Verify state before freeing DescriptorSets
6712static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6713                                              const VkDescriptorSet *descriptor_sets) {
6714    bool skip_call = false;
6715    // First make sure sets being destroyed are not currently in-use
6716    for (uint32_t i = 0; i < count; ++i)
6717        skip_call |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
6718
6719    DESCRIPTOR_POOL_NODE *pool_node = getPoolNode(dev_data, pool);
6720    if (pool_node && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_node->createInfo.flags)) {
6721        // Can't Free from a NON_FREE pool
6722        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6723                             reinterpret_cast<uint64_t &>(pool), __LINE__, DRAWSTATE_CANT_FREE_FROM_NON_FREE_POOL, "DS",
6724                             "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6725                             "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.");
6726    }
6727    return skip_call;
6728}
6729// Sets have been removed from the pool so update underlying state
6730static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6731                                             const VkDescriptorSet *descriptor_sets) {
6732    DESCRIPTOR_POOL_NODE *pool_state = getPoolNode(dev_data, pool);
6733    // Update available descriptor sets in pool
6734    pool_state->availableSets += count;
6735
6736    // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
6737    for (uint32_t i = 0; i < count; ++i) {
6738        auto set_state = dev_data->setMap[descriptor_sets[i]];
6739        uint32_t type_index = 0, descriptor_count = 0;
6740        for (uint32_t j = 0; j < set_state->GetBindingCount(); ++j) {
6741            type_index = static_cast<uint32_t>(set_state->GetTypeFromIndex(j));
6742            descriptor_count = set_state->GetDescriptorCountFromIndex(j);
6743            pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
6744        }
6745        freeDescriptorSet(dev_data, set_state);
6746        pool_state->sets.erase(set_state);
6747    }
6748}
6749
6750VKAPI_ATTR VkResult VKAPI_CALL
6751FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6752    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6753    // Make sure that no sets being destroyed are in-flight
6754    std::unique_lock<std::mutex> lock(global_lock);
6755    bool skip_call = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6756    lock.unlock();
6757
6758    if (skip_call)
6759        return VK_ERROR_VALIDATION_FAILED_EXT;
6760    VkResult result = dev_data->device_dispatch_table->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6761    if (VK_SUCCESS == result) {
6762        lock.lock();
6763        PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6764        lock.unlock();
6765    }
6766    return result;
6767}
6768// TODO : This is a Proof-of-concept for core validation architecture
6769//  Really we'll want to break out these functions to separate files but
6770//  keeping it all together here to prove out design
6771// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
6772static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6773                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6774                                                const VkCopyDescriptorSet *pDescriptorCopies) {
6775    // First thing to do is perform map look-ups.
6776    // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
6777    //  so we can't just do a single map look-up up-front, but do them individually in functions below
6778
6779    // Now make call(s) that validate state, but don't perform state updates in this function
6780    // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
6781    //  namespace which will parse params and make calls into specific class instances
6782    return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
6783                                                         descriptorCopyCount, pDescriptorCopies);
6784}
6785// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
6786static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6787                                               const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6788                                               const VkCopyDescriptorSet *pDescriptorCopies) {
6789    cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6790                                                 pDescriptorCopies);
6791}
6792
6793VKAPI_ATTR void VKAPI_CALL
6794UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6795                     uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6796    // Only map look-up at top level is for device-level layer_data
6797    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6798    std::unique_lock<std::mutex> lock(global_lock);
6799    bool skip_call = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6800                                                         pDescriptorCopies);
6801    lock.unlock();
6802    if (!skip_call) {
6803        dev_data->device_dispatch_table->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6804                                                              pDescriptorCopies);
6805        lock.lock();
6806        // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
6807        PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6808                                           pDescriptorCopies);
6809    }
6810}
6811
6812VKAPI_ATTR VkResult VKAPI_CALL
6813AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6814    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6815    VkResult result = dev_data->device_dispatch_table->AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6816    if (VK_SUCCESS == result) {
6817        std::unique_lock<std::mutex> lock(global_lock);
6818        auto pPool = getCommandPoolNode(dev_data, pCreateInfo->commandPool);
6819
6820        if (pPool) {
6821            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6822                // Add command buffer to its commandPool map
6823                pPool->commandBuffers.push_back(pCommandBuffer[i]);
6824                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6825                // Add command buffer to map
6826                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6827                resetCB(dev_data, pCommandBuffer[i]);
6828                pCB->createInfo = *pCreateInfo;
6829                pCB->device = device;
6830            }
6831        }
6832        printCBList(dev_data);
6833        lock.unlock();
6834    }
6835    return result;
6836}
6837
6838// Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
6839static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_NODE *fb_state) {
6840    fb_state->cb_bindings.insert(cb_state);
6841    for (auto attachment : fb_state->attachments) {
6842        auto view_state = attachment.view_state;
6843        if (view_state) {
6844            AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
6845        }
6846        auto rp_state = getRenderPass(dev_data, fb_state->createInfo.renderPass);
6847        if (rp_state) {
6848            addCommandBufferBinding(
6849                &rp_state->cb_bindings,
6850                {reinterpret_cast<uint64_t &>(rp_state->renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT}, cb_state);
6851        }
6852    }
6853}
6854
6855VKAPI_ATTR VkResult VKAPI_CALL
6856BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6857    bool skip_call = false;
6858    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6859    std::unique_lock<std::mutex> lock(global_lock);
6860    // Validate command buffer level
6861    GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
6862    if (cb_node) {
6863        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6864        if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
6865            skip_call |=
6866                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6867                        (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6868                        "Calling vkBeginCommandBuffer() on active CB 0x%p before it has completed. "
6869                        "You must check CB fence before this call.",
6870                        commandBuffer);
6871        }
6872        clear_cmd_buf_and_mem_references(dev_data, cb_node);
6873        if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6874            // Secondary Command Buffer
6875            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6876            if (!pInfo) {
6877                skip_call |=
6878                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6879                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6880                            "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info.",
6881                            reinterpret_cast<void *>(commandBuffer));
6882            } else {
6883                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
6884                    if (!pInfo->renderPass) { // renderpass should NOT be null for a Secondary CB
6885                        skip_call |= log_msg(
6886                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6887                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6888                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must specify a valid renderpass parameter.",
6889                            reinterpret_cast<void *>(commandBuffer));
6890                    }
6891                    if (!pInfo->framebuffer) { // framebuffer may be null for a Secondary CB, but this affects perf
6892                        skip_call |= log_msg(
6893                            dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6894                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6895                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) may perform better if a "
6896                            "valid framebuffer parameter is specified.",
6897                            reinterpret_cast<void *>(commandBuffer));
6898                    } else {
6899                        string errorString = "";
6900                        auto framebuffer = getFramebuffer(dev_data, pInfo->framebuffer);
6901                        if (framebuffer) {
6902                            if ((framebuffer->createInfo.renderPass != pInfo->renderPass) &&
6903                                !verify_renderpass_compatibility(dev_data, framebuffer->renderPassCreateInfo.ptr(),
6904                                                                 getRenderPass(dev_data, pInfo->renderPass)->pCreateInfo,
6905                                                                 errorString)) {
6906                                // renderPass that framebuffer was created with must be compatible with local renderPass
6907                                skip_call |= log_msg(
6908                                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6909                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6910                                    __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
6911                                    "vkBeginCommandBuffer(): Secondary Command "
6912                                    "Buffer (0x%p) renderPass (0x%" PRIxLEAST64 ") is incompatible w/ framebuffer "
6913                                    "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
6914                                    reinterpret_cast<void *>(commandBuffer), reinterpret_cast<const uint64_t &>(pInfo->renderPass),
6915                                    reinterpret_cast<const uint64_t &>(pInfo->framebuffer),
6916                                    reinterpret_cast<uint64_t &>(framebuffer->createInfo.renderPass), errorString.c_str());
6917                            }
6918                            // Connect this framebuffer and its children to this cmdBuffer
6919                            AddFramebufferBinding(dev_data, cb_node, framebuffer);
6920                        }
6921                    }
6922                }
6923                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
6924                     dev_data->phys_dev_properties.features.occlusionQueryPrecise == VK_FALSE) &&
6925                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
6926                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
6927                                         VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
6928                                         __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6929                                         "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
6930                                         "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
6931                                         "support precise occlusion queries.",
6932                                         reinterpret_cast<void *>(commandBuffer));
6933                }
6934            }
6935            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
6936                auto renderPass = getRenderPass(dev_data, pInfo->renderPass);
6937                if (renderPass) {
6938                    if (pInfo->subpass >= renderPass->pCreateInfo->subpassCount) {
6939                        skip_call |= log_msg(
6940                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6941                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6942                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must has a subpass index (%d) "
6943                            "that is less than the number of subpasses (%d).",
6944                            (void *)commandBuffer, pInfo->subpass, renderPass->pCreateInfo->subpassCount);
6945                    }
6946                }
6947            }
6948        }
6949        if (CB_RECORDING == cb_node->state) {
6950            skip_call |=
6951                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6952                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
6953                        "vkBeginCommandBuffer(): Cannot call Begin on CB (0x%" PRIxLEAST64
6954                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
6955                        (uint64_t)commandBuffer);
6956        } else if (CB_RECORDED == cb_node->state || (CB_INVALID == cb_node->state && CMD_END == cb_node->cmds.back().type)) {
6957            VkCommandPool cmdPool = cb_node->createInfo.commandPool;
6958            auto pPool = getCommandPoolNode(dev_data, cmdPool);
6959            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
6960                skip_call |=
6961                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6962                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6963                            "Call to vkBeginCommandBuffer() on command buffer (0x%" PRIxLEAST64
6964                            ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
6965                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
6966                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
6967            }
6968            resetCB(dev_data, commandBuffer);
6969        }
6970        // Set updated state here in case implicit reset occurs above
6971        cb_node->state = CB_RECORDING;
6972        cb_node->beginInfo = *pBeginInfo;
6973        if (cb_node->beginInfo.pInheritanceInfo) {
6974            cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
6975            cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
6976            // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
6977            if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
6978                (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
6979                cb_node->activeRenderPass = getRenderPass(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
6980                cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
6981                cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
6982            }
6983        }
6984    } else {
6985        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6986                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
6987                             "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB 0x%p!", (void *)commandBuffer);
6988    }
6989    lock.unlock();
6990    if (skip_call) {
6991        return VK_ERROR_VALIDATION_FAILED_EXT;
6992    }
6993    VkResult result = dev_data->device_dispatch_table->BeginCommandBuffer(commandBuffer, pBeginInfo);
6994
6995    return result;
6996}
6997
6998VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
6999    bool skip_call = false;
7000    VkResult result = VK_SUCCESS;
7001    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7002    std::unique_lock<std::mutex> lock(global_lock);
7003    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7004    if (pCB) {
7005        if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) || !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
7006            // This needs spec clarification to update valid usage, see comments in PR:
7007            // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
7008            skip_call |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer");
7009        }
7010        skip_call |= addCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
7011        for (auto query : pCB->activeQueries) {
7012            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7013                                 DRAWSTATE_INVALID_QUERY, "DS",
7014                                 "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d",
7015                                 (uint64_t)(query.pool), query.index);
7016        }
7017    }
7018    if (!skip_call) {
7019        lock.unlock();
7020        result = dev_data->device_dispatch_table->EndCommandBuffer(commandBuffer);
7021        lock.lock();
7022        if (VK_SUCCESS == result) {
7023            pCB->state = CB_RECORDED;
7024            // Reset CB status flags
7025            pCB->status = 0;
7026            printCB(dev_data, commandBuffer);
7027        }
7028    } else {
7029        result = VK_ERROR_VALIDATION_FAILED_EXT;
7030    }
7031    lock.unlock();
7032    return result;
7033}
7034
7035VKAPI_ATTR VkResult VKAPI_CALL
7036ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
7037    bool skip_call = false;
7038    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7039    std::unique_lock<std::mutex> lock(global_lock);
7040    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7041    VkCommandPool cmdPool = pCB->createInfo.commandPool;
7042    auto pPool = getCommandPoolNode(dev_data, cmdPool);
7043    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
7044        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7045                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7046                             "Attempt to reset command buffer (0x%" PRIxLEAST64 ") created from command pool (0x%" PRIxLEAST64
7047                             ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7048                             (uint64_t)commandBuffer, (uint64_t)cmdPool);
7049    }
7050    skip_call |= checkCommandBufferInFlight(dev_data, pCB, "reset");
7051    lock.unlock();
7052    if (skip_call)
7053        return VK_ERROR_VALIDATION_FAILED_EXT;
7054    VkResult result = dev_data->device_dispatch_table->ResetCommandBuffer(commandBuffer, flags);
7055    if (VK_SUCCESS == result) {
7056        lock.lock();
7057        dev_data->globalInFlightCmdBuffers.erase(commandBuffer);
7058        resetCB(dev_data, commandBuffer);
7059        lock.unlock();
7060    }
7061    return result;
7062}
7063
7064VKAPI_ATTR void VKAPI_CALL
7065CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
7066    bool skip_call = false;
7067    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7068    std::unique_lock<std::mutex> lock(global_lock);
7069    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7070    if (pCB) {
7071        skip_call |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
7072        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
7073            skip_call |=
7074                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7075                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
7076                        "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
7077                        (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass->renderPass);
7078        }
7079
7080        PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
7081        if (pPN) {
7082            pCB->lastBound[pipelineBindPoint].pipeline_node = pPN;
7083            set_cb_pso_status(pCB, pPN);
7084            set_pipeline_state(pPN);
7085        } else {
7086            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7087                                 (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
7088                                 "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
7089        }
7090        addCommandBufferBinding(&getPipeline(dev_data, pipeline)->cb_bindings,
7091                                {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT}, pCB);
7092    }
7093    lock.unlock();
7094    if (!skip_call)
7095        dev_data->device_dispatch_table->CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
7096}
7097
7098VKAPI_ATTR void VKAPI_CALL
7099CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
7100    bool skip_call = false;
7101    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7102    std::unique_lock<std::mutex> lock(global_lock);
7103    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7104    if (pCB) {
7105        skip_call |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
7106        pCB->viewportMask |= ((1u<<viewportCount) - 1u) << firstViewport;
7107    }
7108    lock.unlock();
7109    if (!skip_call)
7110        dev_data->device_dispatch_table->CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
7111}
7112
7113VKAPI_ATTR void VKAPI_CALL
7114CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
7115    bool skip_call = false;
7116    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7117    std::unique_lock<std::mutex> lock(global_lock);
7118    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7119    if (pCB) {
7120        skip_call |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
7121        pCB->scissorMask |= ((1u<<scissorCount) - 1u) << firstScissor;
7122    }
7123    lock.unlock();
7124    if (!skip_call)
7125        dev_data->device_dispatch_table->CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
7126}
7127
7128VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
7129    bool skip_call = false;
7130    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7131    std::unique_lock<std::mutex> lock(global_lock);
7132    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7133    if (pCB) {
7134        skip_call |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
7135        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
7136
7137        PIPELINE_NODE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_node;
7138        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
7139            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7140                                 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SET, "DS",
7141                                 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
7142                                 "flag.  This is undefined behavior and could be ignored.");
7143        } else {
7144            skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
7145        }
7146    }
7147    lock.unlock();
7148    if (!skip_call)
7149        dev_data->device_dispatch_table->CmdSetLineWidth(commandBuffer, lineWidth);
7150}
7151
7152VKAPI_ATTR void VKAPI_CALL
7153CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
7154    bool skip_call = false;
7155    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7156    std::unique_lock<std::mutex> lock(global_lock);
7157    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7158    if (pCB) {
7159        skip_call |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
7160        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
7161    }
7162    lock.unlock();
7163    if (!skip_call)
7164        dev_data->device_dispatch_table->CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp,
7165                                                         depthBiasSlopeFactor);
7166}
7167
7168VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
7169    bool skip_call = false;
7170    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7171    std::unique_lock<std::mutex> lock(global_lock);
7172    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7173    if (pCB) {
7174        skip_call |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
7175        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
7176    }
7177    lock.unlock();
7178    if (!skip_call)
7179        dev_data->device_dispatch_table->CmdSetBlendConstants(commandBuffer, blendConstants);
7180}
7181
7182VKAPI_ATTR void VKAPI_CALL
7183CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
7184    bool skip_call = false;
7185    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7186    std::unique_lock<std::mutex> lock(global_lock);
7187    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7188    if (pCB) {
7189        skip_call |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
7190        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
7191    }
7192    lock.unlock();
7193    if (!skip_call)
7194        dev_data->device_dispatch_table->CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
7195}
7196
7197VKAPI_ATTR void VKAPI_CALL
7198CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
7199    bool skip_call = false;
7200    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7201    std::unique_lock<std::mutex> lock(global_lock);
7202    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7203    if (pCB) {
7204        skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
7205        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
7206    }
7207    lock.unlock();
7208    if (!skip_call)
7209        dev_data->device_dispatch_table->CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
7210}
7211
7212VKAPI_ATTR void VKAPI_CALL
7213CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
7214    bool skip_call = false;
7215    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7216    std::unique_lock<std::mutex> lock(global_lock);
7217    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7218    if (pCB) {
7219        skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7220        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7221    }
7222    lock.unlock();
7223    if (!skip_call)
7224        dev_data->device_dispatch_table->CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
7225}
7226
7227VKAPI_ATTR void VKAPI_CALL
7228CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
7229    bool skip_call = false;
7230    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7231    std::unique_lock<std::mutex> lock(global_lock);
7232    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7233    if (pCB) {
7234        skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7235        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7236    }
7237    lock.unlock();
7238    if (!skip_call)
7239        dev_data->device_dispatch_table->CmdSetStencilReference(commandBuffer, faceMask, reference);
7240}
7241
7242VKAPI_ATTR void VKAPI_CALL
7243CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
7244                      uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7245                      const uint32_t *pDynamicOffsets) {
7246    bool skip_call = false;
7247    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7248    std::unique_lock<std::mutex> lock(global_lock);
7249    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7250    if (pCB) {
7251        if (pCB->state == CB_RECORDING) {
7252            // Track total count of dynamic descriptor types to make sure we have an offset for each one
7253            uint32_t totalDynamicDescriptors = 0;
7254            string errorString = "";
7255            uint32_t lastSetIndex = firstSet + setCount - 1;
7256            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
7257                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7258                pCB->lastBound[pipelineBindPoint].dynamicOffsets.resize(lastSetIndex + 1);
7259            }
7260            auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
7261            auto pipeline_layout = getPipelineLayout(dev_data, layout);
7262            for (uint32_t i = 0; i < setCount; i++) {
7263                cvdescriptorset::DescriptorSet *pSet = getSetNode(dev_data, pDescriptorSets[i]);
7264                if (pSet) {
7265                    pCB->lastBound[pipelineBindPoint].pipeline_layout = *pipeline_layout;
7266                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pSet;
7267                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7268                                         VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7269                                         DRAWSTATE_NONE, "DS", "DS 0x%" PRIxLEAST64 " bound on pipeline %s",
7270                                         (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
7271                    if (!pSet->IsUpdated() && (pSet->GetTotalDescriptorCount() != 0)) {
7272                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7273                                             VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7274                                             DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
7275                                             "DS 0x%" PRIxLEAST64
7276                                             " bound but it was never updated. You may want to either update it or not bind it.",
7277                                             (uint64_t)pDescriptorSets[i]);
7278                    }
7279                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
7280                    if (!verify_set_layout_compatibility(dev_data, pSet, pipeline_layout, i + firstSet, errorString)) {
7281                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7282                                             VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7283                                             DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
7284                                             "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
7285                                             "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s",
7286                                             i, i + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str());
7287                    }
7288
7289                    auto setDynamicDescriptorCount = pSet->GetDynamicDescriptorCount();
7290
7291                    pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i].clear();
7292
7293                    if (setDynamicDescriptorCount) {
7294                        // First make sure we won't overstep bounds of pDynamicOffsets array
7295                        if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) {
7296                            skip_call |=
7297                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7298                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7299                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7300                                        "descriptorSet #%u (0x%" PRIxLEAST64
7301                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7302                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7303                                        i, (uint64_t)pDescriptorSets[i], pSet->GetDynamicDescriptorCount(),
7304                                        (dynamicOffsetCount - totalDynamicDescriptors));
7305                        } else { // Validate and store dynamic offsets with the set
7306                            // Validate Dynamic Offset Minimums
7307                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
7308                            for (uint32_t d = 0; d < pSet->GetTotalDescriptorCount(); d++) {
7309                                if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7310                                    if (vk_safe_modulo(
7311                                            pDynamicOffsets[cur_dyn_offset],
7312                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
7313                                        skip_call |= log_msg(
7314                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7315                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7316                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
7317                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7318                                            "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64,
7319                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7320                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
7321                                    }
7322                                    cur_dyn_offset++;
7323                                } else if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7324                                    if (vk_safe_modulo(
7325                                            pDynamicOffsets[cur_dyn_offset],
7326                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
7327                                        skip_call |= log_msg(
7328                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7329                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7330                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
7331                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7332                                            "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64,
7333                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7334                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
7335                                    }
7336                                    cur_dyn_offset++;
7337                                }
7338                            }
7339
7340                            pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i] =
7341                                std::vector<uint32_t>(pDynamicOffsets + totalDynamicDescriptors,
7342                                                      pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount);
7343                            // Keep running total of dynamic descriptor count to verify at the end
7344                            totalDynamicDescriptors += setDynamicDescriptorCount;
7345
7346                        }
7347                    }
7348                } else {
7349                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7350                                         VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7351                                         DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS 0x%" PRIxLEAST64 " that doesn't exist!",
7352                                         (uint64_t)pDescriptorSets[i]);
7353                }
7354                skip_call |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7355                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7356                if (firstSet > 0) { // Check set #s below the first bound set
7357                    for (uint32_t i = 0; i < firstSet; ++i) {
7358                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7359                            !verify_set_layout_compatibility(dev_data, pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i],
7360                                                             pipeline_layout, i, errorString)) {
7361                            skip_call |= log_msg(
7362                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7363                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7364                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7365                                "DescriptorSetDS 0x%" PRIxLEAST64
7366                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7367                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7368                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7369                        }
7370                    }
7371                }
7372                // Check if newly last bound set invalidates any remaining bound sets
7373                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7374                    if (oldFinalBoundSet &&
7375                        !verify_set_layout_compatibility(dev_data, oldFinalBoundSet, pipeline_layout, lastSetIndex, errorString)) {
7376                        auto old_set = oldFinalBoundSet->GetSet();
7377                        skip_call |=
7378                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7379                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__,
7380                                    DRAWSTATE_NONE, "DS", "DescriptorSetDS 0x%" PRIxLEAST64
7381                                                          " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
7382                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
7383                                                          "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7384                                    reinterpret_cast<uint64_t &>(old_set), lastSetIndex,
7385                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7386                                    lastSetIndex + 1, (uint64_t)layout);
7387                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7388                    }
7389                }
7390            }
7391            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7392            if (totalDynamicDescriptors != dynamicOffsetCount) {
7393                skip_call |=
7394                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7395                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7396                            "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7397                            "is %u. It should exactly match the number of dynamic descriptors.",
7398                            setCount, totalDynamicDescriptors, dynamicOffsetCount);
7399            }
7400        } else {
7401            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7402        }
7403    }
7404    lock.unlock();
7405    if (!skip_call)
7406        dev_data->device_dispatch_table->CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7407                                                               pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7408}
7409
7410VKAPI_ATTR void VKAPI_CALL
7411CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
7412    bool skip_call = false;
7413    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7414    // TODO : Somewhere need to verify that IBs have correct usage state flagged
7415    std::unique_lock<std::mutex> lock(global_lock);
7416
7417    auto buff_node = getBufferNode(dev_data, buffer);
7418    auto cb_node = getCBNode(dev_data, commandBuffer);
7419    if (cb_node && buff_node) {
7420        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdBindIndexBuffer()");
7421        std::function<bool()> function = [=]() {
7422            return ValidateBufferMemoryIsValid(dev_data, buff_node, "vkCmdBindIndexBuffer()");
7423        };
7424        cb_node->validate_functions.push_back(function);
7425        skip_call |= addCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7426        VkDeviceSize offset_align = 0;
7427        switch (indexType) {
7428        case VK_INDEX_TYPE_UINT16:
7429            offset_align = 2;
7430            break;
7431        case VK_INDEX_TYPE_UINT32:
7432            offset_align = 4;
7433            break;
7434        default:
7435            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7436            break;
7437        }
7438        if (!offset_align || (offset % offset_align)) {
7439            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7440                                 DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7441                                 "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7442                                 offset, string_VkIndexType(indexType));
7443        }
7444        cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7445    } else {
7446        assert(0);
7447    }
7448    lock.unlock();
7449    if (!skip_call)
7450        dev_data->device_dispatch_table->CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7451}
7452
7453void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7454    uint32_t end = firstBinding + bindingCount;
7455    if (pCB->currentDrawData.buffers.size() < end) {
7456        pCB->currentDrawData.buffers.resize(end);
7457    }
7458    for (uint32_t i = 0; i < bindingCount; ++i) {
7459        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7460    }
7461}
7462
7463static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7464
7465VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7466                                                uint32_t bindingCount, const VkBuffer *pBuffers,
7467                                                const VkDeviceSize *pOffsets) {
7468    bool skip_call = false;
7469    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7470    // TODO : Somewhere need to verify that VBs have correct usage state flagged
7471    std::unique_lock<std::mutex> lock(global_lock);
7472
7473    auto cb_node = getCBNode(dev_data, commandBuffer);
7474    if (cb_node) {
7475        for (uint32_t i = 0; i < bindingCount; ++i) {
7476            auto buff_node = getBufferNode(dev_data, pBuffers[i]);
7477            assert(buff_node);
7478            skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdBindVertexBuffers()");
7479            std::function<bool()> function = [=]() {
7480                return ValidateBufferMemoryIsValid(dev_data, buff_node, "vkCmdBindVertexBuffers()");
7481            };
7482            cb_node->validate_functions.push_back(function);
7483        }
7484        addCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7485        updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
7486    } else {
7487        skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7488    }
7489    lock.unlock();
7490    if (!skip_call)
7491        dev_data->device_dispatch_table->CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7492}
7493
7494/* expects global_lock to be held by caller */
7495static bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
7496    bool skip_call = false;
7497
7498    for (auto imageView : pCB->updateImages) {
7499        auto view_state = getImageViewState(dev_data, imageView);
7500        if (!view_state)
7501            continue;
7502
7503        auto img_node = getImageNode(dev_data, view_state->create_info.image);
7504        assert(img_node);
7505        std::function<bool()> function = [=]() {
7506            SetImageMemoryValid(dev_data, img_node, true);
7507            return false;
7508        };
7509        pCB->validate_functions.push_back(function);
7510    }
7511    for (auto buffer : pCB->updateBuffers) {
7512        auto buff_node = getBufferNode(dev_data, buffer);
7513        assert(buff_node);
7514        std::function<bool()> function = [=]() {
7515            SetBufferMemoryValid(dev_data, buff_node, true);
7516            return false;
7517        };
7518        pCB->validate_functions.push_back(function);
7519    }
7520    return skip_call;
7521}
7522
7523VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7524                                   uint32_t firstVertex, uint32_t firstInstance) {
7525    bool skip_call = false;
7526    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7527    std::unique_lock<std::mutex> lock(global_lock);
7528    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7529    if (pCB) {
7530        skip_call |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
7531        pCB->drawCount[DRAW]++;
7532        skip_call |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDraw");
7533        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7534        // TODO : Need to pass commandBuffer as srcObj here
7535        skip_call |=
7536            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7537                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW]++);
7538        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7539        if (!skip_call) {
7540            updateResourceTrackingOnDraw(pCB);
7541        }
7542        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
7543    }
7544    lock.unlock();
7545    if (!skip_call)
7546        dev_data->device_dispatch_table->CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
7547}
7548
7549VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
7550                                          uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
7551                                                            uint32_t firstInstance) {
7552    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7553    bool skip_call = false;
7554    std::unique_lock<std::mutex> lock(global_lock);
7555    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7556    if (pCB) {
7557        skip_call |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
7558        pCB->drawCount[DRAW_INDEXED]++;
7559        skip_call |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexed");
7560        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7561        // TODO : Need to pass commandBuffer as srcObj here
7562        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7563                             VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7564                             "vkCmdDrawIndexed() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
7565        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7566        if (!skip_call) {
7567            updateResourceTrackingOnDraw(pCB);
7568        }
7569        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
7570    }
7571    lock.unlock();
7572    if (!skip_call)
7573        dev_data->device_dispatch_table->CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset,
7574                                                        firstInstance);
7575}
7576
7577VKAPI_ATTR void VKAPI_CALL
7578CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7579    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7580    bool skip_call = false;
7581    std::unique_lock<std::mutex> lock(global_lock);
7582
7583    auto cb_node = getCBNode(dev_data, commandBuffer);
7584    auto buff_node = getBufferNode(dev_data, buffer);
7585    if (cb_node && buff_node) {
7586        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDrawIndirect()");
7587        AddCommandBufferBindingBuffer(dev_data, cb_node, buff_node);
7588        skip_call |= addCmd(dev_data, cb_node, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
7589        cb_node->drawCount[DRAW_INDIRECT]++;
7590        skip_call |= validate_and_update_draw_state(dev_data, cb_node, false, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndirect");
7591        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
7592        // TODO : Need to pass commandBuffer as srcObj here
7593        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7594                             VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7595                             "vkCmdDrawIndirect() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
7596        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7597        if (!skip_call) {
7598            updateResourceTrackingOnDraw(cb_node);
7599        }
7600        skip_call |= outsideRenderPass(dev_data, cb_node, "vkCmdDrawIndirect()");
7601    } else {
7602        assert(0);
7603    }
7604    lock.unlock();
7605    if (!skip_call)
7606        dev_data->device_dispatch_table->CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
7607}
7608
7609VKAPI_ATTR void VKAPI_CALL
7610CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7611    bool skip_call = false;
7612    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7613    std::unique_lock<std::mutex> lock(global_lock);
7614
7615    auto cb_node = getCBNode(dev_data, commandBuffer);
7616    auto buff_node = getBufferNode(dev_data, buffer);
7617    if (cb_node && buff_node) {
7618        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDrawIndexedIndirect()");
7619        AddCommandBufferBindingBuffer(dev_data, cb_node, buff_node);
7620        skip_call |= addCmd(dev_data, cb_node, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
7621        cb_node->drawCount[DRAW_INDEXED_INDIRECT]++;
7622        skip_call |=
7623            validate_and_update_draw_state(dev_data, cb_node, true, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexedIndirect");
7624        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
7625        // TODO : Need to pass commandBuffer as srcObj here
7626        skip_call |=
7627            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7628                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call 0x%" PRIx64 ", reporting DS state:",
7629                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
7630        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7631        if (!skip_call) {
7632            updateResourceTrackingOnDraw(cb_node);
7633        }
7634        skip_call |= outsideRenderPass(dev_data, cb_node, "vkCmdDrawIndexedIndirect()");
7635    } else {
7636        assert(0);
7637    }
7638    lock.unlock();
7639    if (!skip_call)
7640        dev_data->device_dispatch_table->CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
7641}
7642
7643VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7644    bool skip_call = false;
7645    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7646    std::unique_lock<std::mutex> lock(global_lock);
7647    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7648    if (pCB) {
7649        skip_call |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatch");
7650        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7651        skip_call |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
7652        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
7653    }
7654    lock.unlock();
7655    if (!skip_call)
7656        dev_data->device_dispatch_table->CmdDispatch(commandBuffer, x, y, z);
7657}
7658
7659VKAPI_ATTR void VKAPI_CALL
7660CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7661    bool skip_call = false;
7662    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7663    std::unique_lock<std::mutex> lock(global_lock);
7664
7665    auto cb_node = getCBNode(dev_data, commandBuffer);
7666    auto buff_node = getBufferNode(dev_data, buffer);
7667    if (cb_node && buff_node) {
7668        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDispatchIndirect()");
7669        AddCommandBufferBindingBuffer(dev_data, cb_node, buff_node);
7670        skip_call |=
7671            validate_and_update_draw_state(dev_data, cb_node, false, VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatchIndirect");
7672        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
7673        skip_call |= addCmd(dev_data, cb_node, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
7674        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdDispatchIndirect()");
7675    }
7676    lock.unlock();
7677    if (!skip_call)
7678        dev_data->device_dispatch_table->CmdDispatchIndirect(commandBuffer, buffer, offset);
7679}
7680
7681VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7682                                         uint32_t regionCount, const VkBufferCopy *pRegions) {
7683    bool skip_call = false;
7684    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7685    std::unique_lock<std::mutex> lock(global_lock);
7686
7687    auto cb_node = getCBNode(dev_data, commandBuffer);
7688    auto src_buff_node = getBufferNode(dev_data, srcBuffer);
7689    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
7690    if (cb_node && src_buff_node && dst_buff_node) {
7691        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_node, "vkCmdCopyBuffer()");
7692        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyBuffer()");
7693        // Update bindings between buffers and cmd buffer
7694        AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_node);
7695        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
7696        // Validate that SRC & DST buffers have correct usage flags set
7697        skip_call |= ValidateBufferUsageFlags(dev_data, src_buff_node, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyBuffer()",
7698                                              "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7699        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyBuffer()",
7700                                              "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7701
7702        std::function<bool()> function = [=]() {
7703            return ValidateBufferMemoryIsValid(dev_data, src_buff_node, "vkCmdCopyBuffer()");
7704        };
7705        cb_node->validate_functions.push_back(function);
7706        function = [=]() {
7707            SetBufferMemoryValid(dev_data, dst_buff_node, true);
7708            return false;
7709        };
7710        cb_node->validate_functions.push_back(function);
7711
7712        skip_call |= addCmd(dev_data, cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
7713        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBuffer()");
7714    } else {
7715        // Param_checker will flag errors on invalid objects, just assert here as debugging aid
7716        assert(0);
7717    }
7718    lock.unlock();
7719    if (!skip_call)
7720        dev_data->device_dispatch_table->CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7721}
7722
7723static bool VerifySourceImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage srcImage,
7724                                    VkImageSubresourceLayers subLayers, VkImageLayout srcImageLayout) {
7725    bool skip_call = false;
7726
7727    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7728        uint32_t layer = i + subLayers.baseArrayLayer;
7729        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7730        IMAGE_CMD_BUF_LAYOUT_NODE node;
7731        if (!FindLayout(cb_node, srcImage, sub, node)) {
7732            SetLayout(cb_node, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
7733            continue;
7734        }
7735        if (node.layout != srcImageLayout) {
7736            // TODO: Improve log message in the next pass
7737            skip_call |=
7738                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7739                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
7740                                                                        "and doesn't match the current layout %s.",
7741                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
7742        }
7743    }
7744    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
7745        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7746            // TODO : Can we deal with image node from the top of call tree and avoid map look-up here?
7747            auto image_node = getImageNode(dev_data, srcImage);
7748            if (image_node->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
7749                // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7750                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7751                                     (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7752                                     "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
7753            }
7754        } else {
7755            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7756                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
7757                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
7758                                 string_VkImageLayout(srcImageLayout));
7759        }
7760    }
7761    return skip_call;
7762}
7763
7764static bool VerifyDestImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage destImage,
7765                                  VkImageSubresourceLayers subLayers, VkImageLayout destImageLayout) {
7766    bool skip_call = false;
7767
7768    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7769        uint32_t layer = i + subLayers.baseArrayLayer;
7770        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7771        IMAGE_CMD_BUF_LAYOUT_NODE node;
7772        if (!FindLayout(cb_node, destImage, sub, node)) {
7773            SetLayout(cb_node, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
7774            continue;
7775        }
7776        if (node.layout != destImageLayout) {
7777            skip_call |=
7778                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7779                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7780                                                                        "doesn't match the current layout %s.",
7781                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7782        }
7783    }
7784    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7785        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7786            auto image_node = getImageNode(dev_data, destImage);
7787            if (image_node->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
7788                // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7789                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7790                                     (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7791                                     "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7792            }
7793        } else {
7794            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7795                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
7796                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
7797                                 string_VkImageLayout(destImageLayout));
7798        }
7799    }
7800    return skip_call;
7801}
7802
7803// Test if two VkExtent3D structs are equivalent
7804static inline bool IsExtentEqual(const VkExtent3D *extent, const VkExtent3D *other_extent) {
7805    bool result = true;
7806    if ((extent->width != other_extent->width) || (extent->height != other_extent->height) ||
7807        (extent->depth != other_extent->depth)) {
7808        result = false;
7809    }
7810    return result;
7811}
7812
7813// Returns the image extent of a specific subresource.
7814static inline VkExtent3D GetImageSubresourceExtent(const IMAGE_NODE *img, const VkImageSubresourceLayers *subresource) {
7815    const uint32_t mip = subresource->mipLevel;
7816    VkExtent3D extent = img->createInfo.extent;
7817    extent.width = std::max(1U, extent.width >> mip);
7818    extent.height = std::max(1U, extent.height >> mip);
7819    extent.depth = std::max(1U, extent.depth >> mip);
7820    return extent;
7821}
7822
7823// Test if the extent argument has all dimensions set to 0.
7824static inline bool IsExtentZero(const VkExtent3D *extent) {
7825    return ((extent->width == 0) && (extent->height == 0) && (extent->depth == 0));
7826}
7827
7828// Returns the image transfer granularity for a specific image scaled by compressed block size if necessary.
7829static inline VkExtent3D GetScaledItg(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_NODE *img) {
7830    // Default to (0, 0, 0) granularity in case we can't find the real granularity for the physical device.
7831    VkExtent3D granularity = { 0, 0, 0 };
7832    auto pPool = getCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
7833    if (pPool) {
7834        granularity = dev_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity;
7835        if (vk_format_is_compressed(img->createInfo.format)) {
7836            auto block_size = vk_format_compressed_block_size(img->createInfo.format);
7837            granularity.width *= block_size.width;
7838            granularity.height *= block_size.height;
7839        }
7840    }
7841    return granularity;
7842}
7843
7844// Test elements of a VkExtent3D structure against alignment constraints contained in another VkExtent3D structure
7845static inline bool IsExtentAligned(const VkExtent3D *extent, const VkExtent3D *granularity) {
7846    bool valid = true;
7847    if ((vk_safe_modulo(extent->depth, granularity->depth) != 0) || (vk_safe_modulo(extent->width, granularity->width) != 0) ||
7848        (vk_safe_modulo(extent->height, granularity->height) != 0)) {
7849        valid = false;
7850    }
7851    return valid;
7852}
7853
7854// Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values
7855static inline bool CheckItgOffset(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkOffset3D *offset,
7856                                  const VkExtent3D *granularity, const uint32_t i, const char *function, const char *member) {
7857    bool skip = false;
7858    VkExtent3D offset_extent = {};
7859    offset_extent.width = static_cast<uint32_t>(abs(offset->x));
7860    offset_extent.height = static_cast<uint32_t>(abs(offset->y));
7861    offset_extent.depth = static_cast<uint32_t>(abs(offset->z));
7862    if (IsExtentZero(granularity)) {
7863        // If the queue family image transfer granularity is (0, 0, 0), then the offset must always be (0, 0, 0)
7864        if (IsExtentZero(&offset_extent) == false) {
7865            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7866                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7867                            "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) must be (x=0, y=0, z=0) "
7868                            "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
7869                            function, i, member, offset->x, offset->y, offset->z);
7870        }
7871    } else {
7872        // If the queue family image transfer granularity is not (0, 0, 0), then the offset dimensions must always be even
7873        // integer multiples of the image transfer granularity.
7874        if (IsExtentAligned(&offset_extent, granularity) == false) {
7875            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7876                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7877                            "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) dimensions must be even integer "
7878                            "multiples of this command buffer's queue family image transfer granularity (w=%d, h=%d, d=%d).",
7879                            function, i, member, offset->x, offset->y, offset->z, granularity->width, granularity->height,
7880                            granularity->depth);
7881        }
7882    }
7883    return skip;
7884}
7885
7886// Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values
7887static inline bool CheckItgExtent(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkExtent3D *extent,
7888                                  const VkOffset3D *offset, const VkExtent3D *granularity, const VkExtent3D *subresource_extent,
7889                                  const uint32_t i, const char *function, const char *member) {
7890    bool skip = false;
7891    if (IsExtentZero(granularity)) {
7892        // If the queue family image transfer granularity is (0, 0, 0), then the extent must always match the image
7893        // subresource extent.
7894        if (IsExtentEqual(extent, subresource_extent) == false) {
7895            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7896                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7897                            "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d) "
7898                            "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
7899                            function, i, member, extent->width, extent->height, extent->depth, subresource_extent->width,
7900                            subresource_extent->height, subresource_extent->depth);
7901        }
7902    } else {
7903        // If the queue family image transfer granularity is not (0, 0, 0), then the extent dimensions must always be even
7904        // integer multiples of the image transfer granularity or the offset + extent dimensions must always match the image
7905        // subresource extent dimensions.
7906        VkExtent3D offset_extent_sum = {};
7907        offset_extent_sum.width = static_cast<uint32_t>(abs(offset->x)) + extent->width;
7908        offset_extent_sum.height = static_cast<uint32_t>(abs(offset->y)) + extent->height;
7909        offset_extent_sum.depth = static_cast<uint32_t>(abs(offset->z)) + extent->depth;
7910        if ((IsExtentAligned(extent, granularity) == false) && (IsExtentEqual(&offset_extent_sum, subresource_extent) == false)) {
7911            skip |=
7912                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7913                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7914                        "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) dimensions must be even integer multiples of this command buffer's "
7915                        "queue family image transfer granularity (w=%d, h=%d, d=%d) or offset (x=%d, y=%d, z=%d) + "
7916                        "extent (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d).",
7917                        function, i, member, extent->width, extent->height, extent->depth, granularity->width, granularity->height,
7918                        granularity->depth, offset->x, offset->y, offset->z, extent->width, extent->height, extent->depth,
7919                        subresource_extent->width, subresource_extent->height, subresource_extent->depth);
7920        }
7921    }
7922    return skip;
7923}
7924
7925// Check a uint32_t width or stride value against a queue family's Image Transfer Granularity width value
7926static inline bool CheckItgInt(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const uint32_t value,
7927                               const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
7928    bool skip = false;
7929    if (vk_safe_modulo(value, granularity) != 0) {
7930        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7931                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7932                        "%s: pRegion[%d].%s (%d) must be an even integer multiple of this command buffer's queue family image "
7933                        "transfer granularity width (%d).",
7934                        function, i, member, value, granularity);
7935    }
7936    return skip;
7937}
7938
7939// Check a VkDeviceSize value against a queue family's Image Transfer Granularity width value
7940static inline bool CheckItgSize(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkDeviceSize value,
7941                                const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
7942    bool skip = false;
7943    if (vk_safe_modulo(value, granularity) != 0) {
7944        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7945                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7946                        "%s: pRegion[%d].%s (%" PRIdLEAST64
7947                        ") must be an even integer multiple of this command buffer's queue family image transfer "
7948                        "granularity width (%d).",
7949                        function, i, member, value, granularity);
7950    }
7951    return skip;
7952}
7953
7954// Check valid usage Image Tranfer Granularity requirements for elements of a VkImageCopy structure
7955static inline bool ValidateCopyImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
7956                                                                    const IMAGE_NODE *img, const VkImageCopy *region,
7957                                                                    const uint32_t i, const char *function) {
7958    bool skip = false;
7959    VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
7960    skip |= CheckItgOffset(dev_data, cb_node, &region->srcOffset, &granularity, i, function, "srcOffset");
7961    skip |= CheckItgOffset(dev_data, cb_node, &region->dstOffset, &granularity, i, function, "dstOffset");
7962    VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->dstSubresource);
7963    skip |= CheckItgExtent(dev_data, cb_node, &region->extent, &region->dstOffset, &granularity, &subresource_extent, i, function,
7964                           "extent");
7965    return skip;
7966}
7967
7968// Check valid usage Image Tranfer Granularity requirements for elements of a VkBufferImageCopy structure
7969static inline bool ValidateCopyBufferImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
7970                                                                          const IMAGE_NODE *img, const VkBufferImageCopy *region,
7971                                                                          const uint32_t i, const char *function) {
7972    bool skip = false;
7973    VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
7974    skip |= CheckItgSize(dev_data, cb_node, region->bufferOffset, granularity.width, i, function, "bufferOffset");
7975    skip |= CheckItgInt(dev_data, cb_node, region->bufferRowLength, granularity.width, i, function, "bufferRowLength");
7976    skip |= CheckItgInt(dev_data, cb_node, region->bufferImageHeight, granularity.width, i, function, "bufferImageHeight");
7977    skip |= CheckItgOffset(dev_data, cb_node, &region->imageOffset, &granularity, i, function, "imageOffset");
7978    VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->imageSubresource);
7979    skip |= CheckItgExtent(dev_data, cb_node, &region->imageExtent, &region->imageOffset, &granularity, &subresource_extent, i,
7980                           function, "imageExtent");
7981    return skip;
7982}
7983
7984VKAPI_ATTR void VKAPI_CALL
7985CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
7986             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
7987    bool skip_call = false;
7988    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7989    std::unique_lock<std::mutex> lock(global_lock);
7990
7991    auto cb_node = getCBNode(dev_data, commandBuffer);
7992    auto src_img_node = getImageNode(dev_data, srcImage);
7993    auto dst_img_node = getImageNode(dev_data, dstImage);
7994    if (cb_node && src_img_node && dst_img_node) {
7995        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdCopyImage()");
7996        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdCopyImage()");
7997        // Update bindings between images and cmd buffer
7998        AddCommandBufferBindingImage(dev_data, cb_node, src_img_node);
7999        AddCommandBufferBindingImage(dev_data, cb_node, dst_img_node);
8000        // Validate that SRC & DST images have correct usage flags set
8001        skip_call |= ValidateImageUsageFlags(dev_data, src_img_node, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyImage()",
8002                                             "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8003        skip_call |= ValidateImageUsageFlags(dev_data, dst_img_node, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyImage()",
8004                                             "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8005        std::function<bool()> function = [=]() { return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdCopyImage()"); };
8006        cb_node->validate_functions.push_back(function);
8007        function = [=]() {
8008            SetImageMemoryValid(dev_data, dst_img_node, true);
8009            return false;
8010        };
8011        cb_node->validate_functions.push_back(function);
8012
8013        skip_call |= addCmd(dev_data, cb_node, CMD_COPYIMAGE, "vkCmdCopyImage()");
8014        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImage()");
8015        for (uint32_t i = 0; i < regionCount; ++i) {
8016            skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].srcSubresource, srcImageLayout);
8017            skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].dstSubresource, dstImageLayout);
8018            skip_call |= ValidateCopyImageTransferGranularityRequirements(dev_data, cb_node, dst_img_node, &pRegions[i], i,
8019                                                                          "vkCmdCopyImage()");
8020        }
8021    } else {
8022        assert(0);
8023    }
8024    lock.unlock();
8025    if (!skip_call)
8026        dev_data->device_dispatch_table->CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8027                                                      regionCount, pRegions);
8028}
8029
8030// Validate that an image's sampleCount matches the requirement for a specific API call
8031static inline bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_NODE *image_node, VkSampleCountFlagBits sample_count,
8032                                            const char *location) {
8033    bool skip = false;
8034    if (image_node->createInfo.samples != sample_count) {
8035        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8036                       reinterpret_cast<uint64_t &>(image_node->image), 0, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
8037                       "%s for image 0x%" PRIxLEAST64 " was created with a sample count of %s but must be %s.", location,
8038                       reinterpret_cast<uint64_t &>(image_node->image),
8039                       string_VkSampleCountFlagBits(image_node->createInfo.samples), string_VkSampleCountFlagBits(sample_count));
8040    }
8041    return skip;
8042}
8043
8044VKAPI_ATTR void VKAPI_CALL
8045CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8046             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
8047    bool skip_call = false;
8048    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8049    std::unique_lock<std::mutex> lock(global_lock);
8050
8051    auto cb_node = getCBNode(dev_data, commandBuffer);
8052    auto src_img_node = getImageNode(dev_data, srcImage);
8053    auto dst_img_node = getImageNode(dev_data, dstImage);
8054    if (cb_node && src_img_node && dst_img_node) {
8055        skip_call |= ValidateImageSampleCount(dev_data, src_img_node, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): srcImage");
8056        skip_call |= ValidateImageSampleCount(dev_data, dst_img_node, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): dstImage");
8057        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdBlitImage()");
8058        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdBlitImage()");
8059        // Update bindings between images and cmd buffer
8060        AddCommandBufferBindingImage(dev_data, cb_node, src_img_node);
8061        AddCommandBufferBindingImage(dev_data, cb_node, dst_img_node);
8062        // Validate that SRC & DST images have correct usage flags set
8063        skip_call |= ValidateImageUsageFlags(dev_data, src_img_node, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdBlitImage()",
8064                                             "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8065        skip_call |= ValidateImageUsageFlags(dev_data, dst_img_node, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdBlitImage()",
8066                                             "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8067        std::function<bool()> function = [=]() { return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdBlitImage()"); };
8068        cb_node->validate_functions.push_back(function);
8069        function = [=]() {
8070            SetImageMemoryValid(dev_data, dst_img_node, true);
8071            return false;
8072        };
8073        cb_node->validate_functions.push_back(function);
8074
8075        skip_call |= addCmd(dev_data, cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()");
8076        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBlitImage()");
8077    } else {
8078        assert(0);
8079    }
8080    lock.unlock();
8081    if (!skip_call)
8082        dev_data->device_dispatch_table->CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8083                                                      regionCount, pRegions, filter);
8084}
8085
8086VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
8087                                                VkImage dstImage, VkImageLayout dstImageLayout,
8088                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8089    bool skip_call = false;
8090    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8091    std::unique_lock<std::mutex> lock(global_lock);
8092
8093    auto cb_node = getCBNode(dev_data, commandBuffer);
8094    auto src_buff_node = getBufferNode(dev_data, srcBuffer);
8095    auto dst_img_node = getImageNode(dev_data, dstImage);
8096    if (cb_node && src_buff_node && dst_img_node) {
8097        skip_call |= ValidateImageSampleCount(dev_data, dst_img_node, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyBufferToImage(): dstImage");
8098        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_node, "vkCmdCopyBufferToImage()");
8099        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdCopyBufferToImage()");
8100        AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_node);
8101        AddCommandBufferBindingImage(dev_data, cb_node, dst_img_node);
8102        skip_call |= ValidateBufferUsageFlags(dev_data, src_buff_node, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
8103                                              "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
8104        skip_call |= ValidateImageUsageFlags(dev_data, dst_img_node, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8105                                             "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8106        std::function<bool()> function = [=]() {
8107            SetImageMemoryValid(dev_data, dst_img_node, true);
8108            return false;
8109        };
8110        cb_node->validate_functions.push_back(function);
8111        function = [=]() { return ValidateBufferMemoryIsValid(dev_data, src_buff_node, "vkCmdCopyBufferToImage()"); };
8112        cb_node->validate_functions.push_back(function);
8113
8114        skip_call |= addCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
8115        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBufferToImage()");
8116        for (uint32_t i = 0; i < regionCount; ++i) {
8117            skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].imageSubresource, dstImageLayout);
8118            skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, dst_img_node, &pRegions[i], i,
8119                                                                                "vkCmdCopyBufferToImage()");
8120        }
8121    } else {
8122        assert(0);
8123    }
8124    lock.unlock();
8125    if (!skip_call)
8126        dev_data->device_dispatch_table->CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
8127                                                              pRegions);
8128}
8129
8130VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
8131                                                VkImageLayout srcImageLayout, VkBuffer dstBuffer,
8132                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8133    bool skip_call = false;
8134    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8135    std::unique_lock<std::mutex> lock(global_lock);
8136
8137    auto cb_node = getCBNode(dev_data, commandBuffer);
8138    auto src_img_node = getImageNode(dev_data, srcImage);
8139    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
8140    if (cb_node && src_img_node && dst_buff_node) {
8141        skip_call |= ValidateImageSampleCount(dev_data, src_img_node, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyImageToBuffer(): srcImage");
8142        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdCopyImageToBuffer()");
8143        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyImageToBuffer()");
8144        // Update bindings between buffer/image and cmd buffer
8145        AddCommandBufferBindingImage(dev_data, cb_node, src_img_node);
8146        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
8147        // Validate that SRC image & DST buffer have correct usage flags set
8148        skip_call |= ValidateImageUsageFlags(dev_data, src_img_node, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8149                                             "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8150        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8151                                              "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8152        std::function<bool()> function = [=]() {
8153            return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdCopyImageToBuffer()");
8154        };
8155        cb_node->validate_functions.push_back(function);
8156        function = [=]() {
8157            SetBufferMemoryValid(dev_data, dst_buff_node, true);
8158            return false;
8159        };
8160        cb_node->validate_functions.push_back(function);
8161
8162        skip_call |= addCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
8163        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImageToBuffer()");
8164        for (uint32_t i = 0; i < regionCount; ++i) {
8165            skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].imageSubresource, srcImageLayout);
8166            skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, src_img_node, &pRegions[i], i,
8167                                                                                "CmdCopyImageToBuffer");
8168        }
8169    } else {
8170        assert(0);
8171    }
8172    lock.unlock();
8173    if (!skip_call)
8174        dev_data->device_dispatch_table->CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount,
8175                                                              pRegions);
8176}
8177
8178VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
8179                                           VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
8180    bool skip_call = false;
8181    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8182    std::unique_lock<std::mutex> lock(global_lock);
8183
8184    auto cb_node = getCBNode(dev_data, commandBuffer);
8185    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
8186    if (cb_node && dst_buff_node) {
8187        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdUpdateBuffer()");
8188        // Update bindings between buffer and cmd buffer
8189        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
8190        // Validate that DST buffer has correct usage flags set
8191        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8192                                              "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8193        std::function<bool()> function = [=]() {
8194            SetBufferMemoryValid(dev_data, dst_buff_node, true);
8195            return false;
8196        };
8197        cb_node->validate_functions.push_back(function);
8198
8199        skip_call |= addCmd(dev_data, cb_node, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
8200        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyUpdateBuffer()");
8201    } else {
8202        assert(0);
8203    }
8204    lock.unlock();
8205    if (!skip_call)
8206        dev_data->device_dispatch_table->CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
8207}
8208
8209VKAPI_ATTR void VKAPI_CALL
8210CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
8211    bool skip_call = false;
8212    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8213    std::unique_lock<std::mutex> lock(global_lock);
8214
8215    auto cb_node = getCBNode(dev_data, commandBuffer);
8216    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
8217    if (cb_node && dst_buff_node) {
8218        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdFillBuffer()");
8219        // Update bindings between buffer and cmd buffer
8220        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
8221        // Validate that DST buffer has correct usage flags set
8222        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdFillBuffer()",
8223                                              "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8224        std::function<bool()> function = [=]() {
8225            SetBufferMemoryValid(dev_data, dst_buff_node, true);
8226            return false;
8227        };
8228        cb_node->validate_functions.push_back(function);
8229
8230        skip_call |= addCmd(dev_data, cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()");
8231        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyFillBuffer()");
8232    } else {
8233        assert(0);
8234    }
8235    lock.unlock();
8236    if (!skip_call)
8237        dev_data->device_dispatch_table->CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
8238}
8239
8240VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
8241                                               const VkClearAttachment *pAttachments, uint32_t rectCount,
8242                                               const VkClearRect *pRects) {
8243    bool skip_call = false;
8244    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8245    std::unique_lock<std::mutex> lock(global_lock);
8246    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8247    if (pCB) {
8248        skip_call |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
8249        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
8250        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
8251            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
8252            // TODO : commandBuffer should be srcObj
8253            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
8254            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
8255            // call CmdClearAttachments
8256            // Otherwise this seems more like a performance warning.
8257            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8258                                 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
8259                                 "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
8260                                 " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
8261                                 (uint64_t)(commandBuffer));
8262        }
8263        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments()");
8264    }
8265
8266    // Validate that attachment is in reference list of active subpass
8267    if (pCB->activeRenderPass) {
8268        const VkRenderPassCreateInfo *pRPCI = pCB->activeRenderPass->pCreateInfo;
8269        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
8270
8271        for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
8272            const VkClearAttachment *attachment = &pAttachments[attachment_idx];
8273            if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
8274                if (attachment->colorAttachment >= pSD->colorAttachmentCount) {
8275                    skip_call |= log_msg(
8276                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8277                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8278                        "vkCmdClearAttachments() color attachment index %d out of range for active subpass %d; ignored",
8279                        attachment->colorAttachment, pCB->activeSubpass);
8280                }
8281                else if (pSD->pColorAttachments[attachment->colorAttachment].attachment == VK_ATTACHMENT_UNUSED) {
8282                    skip_call |= log_msg(
8283                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8284                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8285                        "vkCmdClearAttachments() color attachment index %d is VK_ATTACHMENT_UNUSED; ignored",
8286                        attachment->colorAttachment);
8287                }
8288            } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
8289                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
8290                    (pSD->pDepthStencilAttachment->attachment ==
8291                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
8292
8293                    skip_call |= log_msg(
8294                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8295                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8296                        "vkCmdClearAttachments() depth/stencil clear with no depth/stencil attachment in subpass; ignored");
8297                }
8298            }
8299        }
8300    }
8301    lock.unlock();
8302    if (!skip_call)
8303        dev_data->device_dispatch_table->CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8304}
8305
8306VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
8307                                              VkImageLayout imageLayout, const VkClearColorValue *pColor,
8308                                              uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
8309    bool skip_call = false;
8310    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8311    std::unique_lock<std::mutex> lock(global_lock);
8312    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8313
8314    auto cb_node = getCBNode(dev_data, commandBuffer);
8315    auto img_node = getImageNode(dev_data, image);
8316    if (cb_node && img_node) {
8317        skip_call |= ValidateMemoryIsBoundToImage(dev_data, img_node, "vkCmdClearColorImage()");
8318        AddCommandBufferBindingImage(dev_data, cb_node, img_node);
8319        std::function<bool()> function = [=]() {
8320            SetImageMemoryValid(dev_data, img_node, true);
8321            return false;
8322        };
8323        cb_node->validate_functions.push_back(function);
8324
8325        skip_call |= addCmd(dev_data, cb_node, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
8326        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearColorImage()");
8327    } else {
8328        assert(0);
8329    }
8330    lock.unlock();
8331    if (!skip_call)
8332        dev_data->device_dispatch_table->CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
8333}
8334
8335VKAPI_ATTR void VKAPI_CALL
8336CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8337                          const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
8338                          const VkImageSubresourceRange *pRanges) {
8339    bool skip_call = false;
8340    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8341    std::unique_lock<std::mutex> lock(global_lock);
8342    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8343
8344    auto cb_node = getCBNode(dev_data, commandBuffer);
8345    auto img_node = getImageNode(dev_data, image);
8346    if (cb_node && img_node) {
8347        skip_call |= ValidateMemoryIsBoundToImage(dev_data, img_node, "vkCmdClearDepthStencilImage()");
8348        AddCommandBufferBindingImage(dev_data, cb_node, img_node);
8349        std::function<bool()> function = [=]() {
8350            SetImageMemoryValid(dev_data, img_node, true);
8351            return false;
8352        };
8353        cb_node->validate_functions.push_back(function);
8354
8355        skip_call |= addCmd(dev_data, cb_node, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
8356        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearDepthStencilImage()");
8357    } else {
8358        assert(0);
8359    }
8360    lock.unlock();
8361    if (!skip_call)
8362        dev_data->device_dispatch_table->CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount,
8363                                                                   pRanges);
8364}
8365
8366VKAPI_ATTR void VKAPI_CALL
8367CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8368                VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
8369    bool skip_call = false;
8370    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8371    std::unique_lock<std::mutex> lock(global_lock);
8372
8373    auto cb_node = getCBNode(dev_data, commandBuffer);
8374    auto src_img_node = getImageNode(dev_data, srcImage);
8375    auto dst_img_node = getImageNode(dev_data, dstImage);
8376    if (cb_node && src_img_node && dst_img_node) {
8377        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdResolveImage()");
8378        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdResolveImage()");
8379        // Update bindings between images and cmd buffer
8380        AddCommandBufferBindingImage(dev_data, cb_node, src_img_node);
8381        AddCommandBufferBindingImage(dev_data, cb_node, dst_img_node);
8382        std::function<bool()> function = [=]() {
8383            return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdResolveImage()");
8384        };
8385        cb_node->validate_functions.push_back(function);
8386        function = [=]() {
8387            SetImageMemoryValid(dev_data, dst_img_node, true);
8388            return false;
8389        };
8390        cb_node->validate_functions.push_back(function);
8391
8392        skip_call |= addCmd(dev_data, cb_node, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
8393        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdResolveImage()");
8394    } else {
8395        assert(0);
8396    }
8397    lock.unlock();
8398    if (!skip_call)
8399        dev_data->device_dispatch_table->CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout,
8400                                                         regionCount, pRegions);
8401}
8402
8403bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8404    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8405    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8406    if (pCB) {
8407        pCB->eventToStageMap[event] = stageMask;
8408    }
8409    auto queue_data = dev_data->queueMap.find(queue);
8410    if (queue_data != dev_data->queueMap.end()) {
8411        queue_data->second.eventToStageMap[event] = stageMask;
8412    }
8413    return false;
8414}
8415
8416VKAPI_ATTR void VKAPI_CALL
8417CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8418    bool skip_call = false;
8419    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8420    std::unique_lock<std::mutex> lock(global_lock);
8421    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8422    if (pCB) {
8423        skip_call |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
8424        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
8425        auto event_node = getEventNode(dev_data, event);
8426        if (event_node) {
8427            addCommandBufferBinding(&event_node->cb_bindings,
8428                                    {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
8429            event_node->cb_bindings.insert(pCB);
8430        }
8431        pCB->events.push_back(event);
8432        if (!pCB->waitedEvents.count(event)) {
8433            pCB->writeEventsBeforeWait.push_back(event);
8434        }
8435        std::function<bool(VkQueue)> eventUpdate =
8436            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
8437        pCB->eventUpdates.push_back(eventUpdate);
8438    }
8439    lock.unlock();
8440    if (!skip_call)
8441        dev_data->device_dispatch_table->CmdSetEvent(commandBuffer, event, stageMask);
8442}
8443
8444VKAPI_ATTR void VKAPI_CALL
8445CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8446    bool skip_call = false;
8447    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8448    std::unique_lock<std::mutex> lock(global_lock);
8449    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8450    if (pCB) {
8451        skip_call |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
8452        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
8453        auto event_node = getEventNode(dev_data, event);
8454        if (event_node) {
8455            addCommandBufferBinding(&event_node->cb_bindings,
8456                                    {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
8457            event_node->cb_bindings.insert(pCB);
8458        }
8459        pCB->events.push_back(event);
8460        if (!pCB->waitedEvents.count(event)) {
8461            pCB->writeEventsBeforeWait.push_back(event);
8462        }
8463        std::function<bool(VkQueue)> eventUpdate =
8464            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
8465        pCB->eventUpdates.push_back(eventUpdate);
8466    }
8467    lock.unlock();
8468    if (!skip_call)
8469        dev_data->device_dispatch_table->CmdResetEvent(commandBuffer, event, stageMask);
8470}
8471
8472static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8473                                   const VkImageMemoryBarrier *pImgMemBarriers) {
8474    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8475    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8476    bool skip = false;
8477    uint32_t levelCount = 0;
8478    uint32_t layerCount = 0;
8479
8480    for (uint32_t i = 0; i < memBarrierCount; ++i) {
8481        auto mem_barrier = &pImgMemBarriers[i];
8482        if (!mem_barrier)
8483            continue;
8484        // TODO: Do not iterate over every possibility - consolidate where
8485        // possible
8486        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
8487
8488        for (uint32_t j = 0; j < levelCount; j++) {
8489            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
8490            for (uint32_t k = 0; k < layerCount; k++) {
8491                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
8492                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
8493                IMAGE_CMD_BUF_LAYOUT_NODE node;
8494                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
8495                    SetLayout(pCB, mem_barrier->image, sub,
8496                              IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
8497                    continue;
8498                }
8499                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
8500                    // TODO: Set memory invalid which is in mem_tracker currently
8501                } else if (node.layout != mem_barrier->oldLayout) {
8502                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8503                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
8504                                                                                    "when current layout is %s.",
8505                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
8506                }
8507                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
8508            }
8509        }
8510    }
8511    return skip;
8512}
8513
8514// Print readable FlagBits in FlagMask
8515static std::string string_VkAccessFlags(VkAccessFlags accessMask) {
8516    std::string result;
8517    std::string separator;
8518
8519    if (accessMask == 0) {
8520        result = "[None]";
8521    } else {
8522        result = "[";
8523        for (auto i = 0; i < 32; i++) {
8524            if (accessMask & (1 << i)) {
8525                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
8526                separator = " | ";
8527            }
8528        }
8529        result = result + "]";
8530    }
8531    return result;
8532}
8533
8534// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
8535// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
8536// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
8537static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8538                             const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
8539                             const char *type) {
8540    bool skip_call = false;
8541
8542    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
8543        if (accessMask & ~(required_bit | optional_bits)) {
8544            // TODO: Verify against Valid Use
8545            skip_call |=
8546                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8547                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
8548                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8549        }
8550    } else {
8551        if (!required_bit) {
8552            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8553                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
8554                                                                  "%s when layout is %s, unless the app has previously added a "
8555                                                                  "barrier for this transition.",
8556                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
8557                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
8558        } else {
8559            std::string opt_bits;
8560            if (optional_bits != 0) {
8561                std::stringstream ss;
8562                ss << optional_bits;
8563                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
8564            }
8565            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8566                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
8567                                                                  "layout is %s, unless the app has previously added a barrier for "
8568                                                                  "this transition.",
8569                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
8570                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
8571        }
8572    }
8573    return skip_call;
8574}
8575
8576static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8577                                        const VkImageLayout &layout, const char *type) {
8578    bool skip_call = false;
8579    switch (layout) {
8580    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
8581        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
8582                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
8583        break;
8584    }
8585    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
8586        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
8587                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
8588        break;
8589    }
8590    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
8591        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
8592        break;
8593    }
8594    case VK_IMAGE_LAYOUT_PREINITIALIZED: {
8595        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
8596        break;
8597    }
8598    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
8599        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8600                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8601        break;
8602    }
8603    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
8604        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8605                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8606        break;
8607    }
8608    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
8609        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
8610        break;
8611    }
8612    case VK_IMAGE_LAYOUT_UNDEFINED: {
8613        if (accessMask != 0) {
8614            // TODO: Verify against Valid Use section spec
8615            skip_call |=
8616                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8617                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
8618                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8619        }
8620        break;
8621    }
8622    case VK_IMAGE_LAYOUT_GENERAL:
8623    default: { break; }
8624    }
8625    return skip_call;
8626}
8627
8628static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8629                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
8630                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
8631                             const VkImageMemoryBarrier *pImageMemBarriers) {
8632    bool skip_call = false;
8633    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8634    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8635    if (pCB->activeRenderPass && memBarrierCount) {
8636        if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
8637            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8638                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
8639                                                                  "with no self dependency specified.",
8640                                 funcName, pCB->activeSubpass);
8641        }
8642    }
8643    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
8644        auto mem_barrier = &pImageMemBarriers[i];
8645        auto image_data = getImageNode(dev_data, mem_barrier->image);
8646        if (image_data) {
8647            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
8648            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
8649            if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
8650                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
8651                // be VK_QUEUE_FAMILY_IGNORED
8652                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
8653                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8654                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8655                                         "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
8656                                         "VK_SHARING_MODE_CONCURRENT.  Src and dst "
8657                                         " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
8658                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8659                }
8660            } else {
8661                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
8662                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
8663                // or both be a valid queue family
8664                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
8665                    (src_q_f_index != dst_q_f_index)) {
8666                    skip_call |=
8667                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8668                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
8669                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
8670                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
8671                                                                     "must be.",
8672                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8673                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
8674                           ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
8675                            (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
8676                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8677                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8678                                         "%s: Image 0x%" PRIx64 " was created with sharingMode "
8679                                         "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
8680                                         " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
8681                                         "queueFamilies crated for this device.",
8682                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
8683                                         dst_q_f_index, dev_data->phys_dev_properties.queue_family_properties.size());
8684                }
8685            }
8686        }
8687
8688        if (mem_barrier) {
8689            skip_call |=
8690                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
8691            skip_call |=
8692                ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
8693            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
8694                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8695                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
8696                                                         "PREINITIALIZED.",
8697                        funcName);
8698            }
8699            auto image_data = getImageNode(dev_data, mem_barrier->image);
8700            VkFormat format = VK_FORMAT_UNDEFINED;
8701            uint32_t arrayLayers = 0, mipLevels = 0;
8702            bool imageFound = false;
8703            if (image_data) {
8704                format = image_data->createInfo.format;
8705                arrayLayers = image_data->createInfo.arrayLayers;
8706                mipLevels = image_data->createInfo.mipLevels;
8707                imageFound = true;
8708            } else if (dev_data->device_extensions.wsi_enabled) {
8709                auto imageswap_data = getSwapchainFromImage(dev_data, mem_barrier->image);
8710                if (imageswap_data) {
8711                    auto swapchain_data = getSwapchainNode(dev_data, imageswap_data);
8712                    if (swapchain_data) {
8713                        format = swapchain_data->createInfo.imageFormat;
8714                        arrayLayers = swapchain_data->createInfo.imageArrayLayers;
8715                        mipLevels = 1;
8716                        imageFound = true;
8717                    }
8718                }
8719            }
8720            if (imageFound) {
8721                auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
8722                if (vk_format_is_depth_or_stencil(format)) {
8723                    if (vk_format_is_depth_and_stencil(format)) {
8724                        if (!(aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) && !(aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
8725                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8726                                    __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
8727                                    "%s: Image is a depth and stencil format and thus must "
8728                                    "have either one or both of VK_IMAGE_ASPECT_DEPTH_BIT and "
8729                                    "VK_IMAGE_ASPECT_STENCIL_BIT set.",
8730                                    funcName);
8731                        }
8732                    } else if (vk_format_is_depth_only(format)) {
8733                        if (!(aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT)) {
8734                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8735                                    __LINE__, DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth-only format and thus must "
8736                                                                               "have VK_IMAGE_ASPECT_DEPTH_BIT set.",
8737                                    funcName);
8738                        }
8739                    } else { // stencil-only case
8740                        if (!(aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
8741                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8742                                    __LINE__, DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a stencil-only format and thus must "
8743                                                                               "have VK_IMAGE_ASPECT_STENCIL_BIT set.",
8744                                    funcName);
8745                        }
8746                    }
8747                } else { // image is a color format
8748                    if (!(aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT)) {
8749                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8750                                DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a color format and thus must "
8751                                                                 "have VK_IMAGE_ASPECT_COLOR_BIT set.",
8752                                funcName);
8753                    }
8754                }
8755                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
8756                                     ? 1
8757                                     : mem_barrier->subresourceRange.layerCount;
8758                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
8759                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8760                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
8761                                                             "baseArrayLayer (%d) and layerCount (%d) be less "
8762                                                             "than or equal to the total number of layers (%d).",
8763                            funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
8764                            arrayLayers);
8765                }
8766                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
8767                                     ? 1
8768                                     : mem_barrier->subresourceRange.levelCount;
8769                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
8770                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8771                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
8772                                                             "(%d) and levelCount (%d) be less than or equal to "
8773                                                             "the total number of levels (%d).",
8774                            funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
8775                            mipLevels);
8776                }
8777            }
8778        }
8779    }
8780    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
8781        auto mem_barrier = &pBufferMemBarriers[i];
8782        if (pCB->activeRenderPass) {
8783            skip_call |=
8784                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8785                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
8786        }
8787        if (!mem_barrier)
8788            continue;
8789
8790        // Validate buffer barrier queue family indices
8791        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8792             mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
8793            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8794             mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
8795            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8796                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8797                                 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
8798                                 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
8799                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8800                                 dev_data->phys_dev_properties.queue_family_properties.size());
8801        }
8802
8803        auto buffer_node = getBufferNode(dev_data, mem_barrier->buffer);
8804        if (buffer_node) {
8805            auto buffer_size = buffer_node->memSize;
8806            if (mem_barrier->offset >= buffer_size) {
8807                skip_call |= log_msg(
8808                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8809                    DRAWSTATE_INVALID_BARRIER, "DS",
8810                    "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
8811                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8812                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(buffer_size));
8813            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
8814                skip_call |= log_msg(
8815                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8816                    DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
8817                                                     " whose sum is greater than total size 0x%" PRIx64 ".",
8818                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8819                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
8820                    reinterpret_cast<const uint64_t &>(buffer_size));
8821            }
8822        }
8823    }
8824    return skip_call;
8825}
8826
8827bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
8828    bool skip_call = false;
8829    VkPipelineStageFlags stageMask = 0;
8830    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
8831    for (uint32_t i = 0; i < eventCount; ++i) {
8832        auto event = pCB->events[firstEventIndex + i];
8833        auto queue_data = dev_data->queueMap.find(queue);
8834        if (queue_data == dev_data->queueMap.end())
8835            return false;
8836        auto event_data = queue_data->second.eventToStageMap.find(event);
8837        if (event_data != queue_data->second.eventToStageMap.end()) {
8838            stageMask |= event_data->second;
8839        } else {
8840            auto global_event_data = getEventNode(dev_data, event);
8841            if (!global_event_data) {
8842                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8843                                     reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
8844                                     "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
8845                                     reinterpret_cast<const uint64_t &>(event));
8846            } else {
8847                stageMask |= global_event_data->stageMask;
8848            }
8849        }
8850    }
8851    // TODO: Need to validate that host_bit is only set if set event is called
8852    // but set event can be called at any time.
8853    if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
8854        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8855                             DRAWSTATE_INVALID_EVENT, "DS", "Submitting cmdbuffer with call to VkCmdWaitEvents "
8856                                                            "using srcStageMask 0x%X which must be the bitwise "
8857                                                            "OR of the stageMask parameters used in calls to "
8858                                                            "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
8859                                                            "used with vkSetEvent but instead is 0x%X.",
8860                             sourceStageMask, stageMask);
8861    }
8862    return skip_call;
8863}
8864
8865VKAPI_ATTR void VKAPI_CALL
8866CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
8867              VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8868              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8869              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8870    bool skip_call = false;
8871    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8872    std::unique_lock<std::mutex> lock(global_lock);
8873    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8874    if (pCB) {
8875        auto firstEventIndex = pCB->events.size();
8876        for (uint32_t i = 0; i < eventCount; ++i) {
8877            auto event_node = getEventNode(dev_data, pEvents[i]);
8878            if (event_node) {
8879                addCommandBufferBinding(&event_node->cb_bindings,
8880                                        {reinterpret_cast<const uint64_t &>(pEvents[i]), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT},
8881                                        pCB);
8882                event_node->cb_bindings.insert(pCB);
8883            }
8884            pCB->waitedEvents.insert(pEvents[i]);
8885            pCB->events.push_back(pEvents[i]);
8886        }
8887        std::function<bool(VkQueue)> eventUpdate =
8888            std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask);
8889        pCB->eventUpdates.push_back(eventUpdate);
8890        if (pCB->state == CB_RECORDING) {
8891            skip_call |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
8892        } else {
8893            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
8894        }
8895        skip_call |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8896        skip_call |=
8897            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8898                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8899    }
8900    lock.unlock();
8901    if (!skip_call)
8902        dev_data->device_dispatch_table->CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
8903                                                       memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8904                                                       pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8905}
8906
8907VKAPI_ATTR void VKAPI_CALL
8908CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
8909                   VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8910                   uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8911                   uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8912    bool skip_call = false;
8913    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8914    std::unique_lock<std::mutex> lock(global_lock);
8915    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8916    if (pCB) {
8917        skip_call |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
8918        skip_call |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
8919        skip_call |=
8920            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8921                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8922    }
8923    lock.unlock();
8924    if (!skip_call)
8925        dev_data->device_dispatch_table->CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
8926                                                            memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
8927                                                            pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
8928}
8929
8930bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
8931    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8932    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8933    if (pCB) {
8934        pCB->queryToStateMap[object] = value;
8935    }
8936    auto queue_data = dev_data->queueMap.find(queue);
8937    if (queue_data != dev_data->queueMap.end()) {
8938        queue_data->second.queryToStateMap[object] = value;
8939    }
8940    return false;
8941}
8942
8943VKAPI_ATTR void VKAPI_CALL
8944CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
8945    bool skip_call = false;
8946    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8947    std::unique_lock<std::mutex> lock(global_lock);
8948    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8949    if (pCB) {
8950        QueryObject query = {queryPool, slot};
8951        pCB->activeQueries.insert(query);
8952        if (!pCB->startedQueries.count(query)) {
8953            pCB->startedQueries.insert(query);
8954        }
8955        skip_call |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
8956        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
8957                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
8958    }
8959    lock.unlock();
8960    if (!skip_call)
8961        dev_data->device_dispatch_table->CmdBeginQuery(commandBuffer, queryPool, slot, flags);
8962}
8963
8964VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
8965    bool skip_call = false;
8966    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8967    std::unique_lock<std::mutex> lock(global_lock);
8968    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8969    if (pCB) {
8970        QueryObject query = {queryPool, slot};
8971        if (!pCB->activeQueries.count(query)) {
8972            skip_call |=
8973                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8974                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d",
8975                        (uint64_t)(queryPool), slot);
8976        } else {
8977            pCB->activeQueries.erase(query);
8978        }
8979        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
8980        pCB->queryUpdates.push_back(queryUpdate);
8981        if (pCB->state == CB_RECORDING) {
8982            skip_call |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
8983        } else {
8984            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
8985        }
8986        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
8987                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
8988    }
8989    lock.unlock();
8990    if (!skip_call)
8991        dev_data->device_dispatch_table->CmdEndQuery(commandBuffer, queryPool, slot);
8992}
8993
8994VKAPI_ATTR void VKAPI_CALL
8995CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
8996    bool skip_call = false;
8997    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8998    std::unique_lock<std::mutex> lock(global_lock);
8999    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9000    if (pCB) {
9001        for (uint32_t i = 0; i < queryCount; i++) {
9002            QueryObject query = {queryPool, firstQuery + i};
9003            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
9004            std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false);
9005            pCB->queryUpdates.push_back(queryUpdate);
9006        }
9007        if (pCB->state == CB_RECORDING) {
9008            skip_call |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
9009        } else {
9010            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
9011        }
9012        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
9013        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9014                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9015    }
9016    lock.unlock();
9017    if (!skip_call)
9018        dev_data->device_dispatch_table->CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
9019}
9020
9021bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) {
9022    bool skip_call = false;
9023    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
9024    auto queue_data = dev_data->queueMap.find(queue);
9025    if (queue_data == dev_data->queueMap.end())
9026        return false;
9027    for (uint32_t i = 0; i < queryCount; i++) {
9028        QueryObject query = {queryPool, firstQuery + i};
9029        auto query_data = queue_data->second.queryToStateMap.find(query);
9030        bool fail = false;
9031        if (query_data != queue_data->second.queryToStateMap.end()) {
9032            if (!query_data->second) {
9033                fail = true;
9034            }
9035        } else {
9036            auto global_query_data = dev_data->queryToStateMap.find(query);
9037            if (global_query_data != dev_data->queryToStateMap.end()) {
9038                if (!global_query_data->second) {
9039                    fail = true;
9040                }
9041            } else {
9042                fail = true;
9043            }
9044        }
9045        if (fail) {
9046            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9047                                 DRAWSTATE_INVALID_QUERY, "DS",
9048                                 "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
9049                                 reinterpret_cast<uint64_t &>(queryPool), firstQuery + i);
9050        }
9051    }
9052    return skip_call;
9053}
9054
9055VKAPI_ATTR void VKAPI_CALL
9056CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
9057                        VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
9058    bool skip_call = false;
9059    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9060    std::unique_lock<std::mutex> lock(global_lock);
9061
9062    auto cb_node = getCBNode(dev_data, commandBuffer);
9063    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
9064    if (cb_node && dst_buff_node) {
9065        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyQueryPoolResults()");
9066        // Update bindings between buffer and cmd buffer
9067        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
9068        // Validate that DST buffer has correct usage flags set
9069        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
9070                                              "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
9071        std::function<bool()> function = [=]() {
9072            SetBufferMemoryValid(dev_data, dst_buff_node, true);
9073            return false;
9074        };
9075        cb_node->validate_functions.push_back(function);
9076        std::function<bool(VkQueue)> queryUpdate =
9077            std::bind(validateQuery, std::placeholders::_1, cb_node, queryPool, queryCount, firstQuery);
9078        cb_node->queryUpdates.push_back(queryUpdate);
9079        if (cb_node->state == CB_RECORDING) {
9080            skip_call |= addCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
9081        } else {
9082            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
9083        }
9084        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()");
9085        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9086                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, cb_node);
9087    } else {
9088        assert(0);
9089    }
9090    lock.unlock();
9091    if (!skip_call)
9092        dev_data->device_dispatch_table->CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer,
9093                                                                 dstOffset, stride, flags);
9094}
9095
9096VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
9097                                            VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
9098                                            const void *pValues) {
9099    bool skip_call = false;
9100    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9101    std::unique_lock<std::mutex> lock(global_lock);
9102    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9103    if (pCB) {
9104        if (pCB->state == CB_RECORDING) {
9105            skip_call |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
9106        } else {
9107            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
9108        }
9109    }
9110    skip_call |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
9111    if (0 == stageFlags) {
9112        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9113                             DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() call has no stageFlags set.");
9114    }
9115
9116    // Check if push constant update is within any of the ranges with the same stage flags specified in pipeline layout.
9117    auto pipeline_layout = getPipelineLayout(dev_data, layout);
9118    // Coalesce adjacent/overlapping pipeline ranges before checking to see if incoming range is
9119    // contained in the pipeline ranges.
9120    // Build a {start, end} span list for ranges with matching stage flags.
9121    const auto &ranges = pipeline_layout->push_constant_ranges;
9122    struct span {
9123        uint32_t start;
9124        uint32_t end;
9125    };
9126    std::vector<span> spans;
9127    spans.reserve(ranges.size());
9128    for (const auto &iter : ranges) {
9129        if (iter.stageFlags == stageFlags) {
9130            spans.push_back({iter.offset, iter.offset + iter.size});
9131        }
9132    }
9133    if (spans.size() == 0) {
9134        // There were no ranges that matched the stageFlags.
9135        skip_call |=
9136            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9137                    DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() stageFlags = 0x%" PRIx32 " do not match "
9138                                                          "the stageFlags in any of the ranges in pipeline layout 0x%" PRIx64 ".",
9139                    (uint32_t)stageFlags, (uint64_t)layout);
9140    } else {
9141        // Sort span list by start value.
9142        struct comparer {
9143            bool operator()(struct span i, struct span j) { return i.start < j.start; }
9144        } my_comparer;
9145        std::sort(spans.begin(), spans.end(), my_comparer);
9146
9147        // Examine two spans at a time.
9148        std::vector<span>::iterator current = spans.begin();
9149        std::vector<span>::iterator next = current + 1;
9150        while (next != spans.end()) {
9151            if (current->end < next->start) {
9152                // There is a gap; cannot coalesce. Move to the next two spans.
9153                ++current;
9154                ++next;
9155            } else {
9156                // Coalesce the two spans.  The start of the next span
9157                // is within the current span, so pick the larger of
9158                // the end values to extend the current span.
9159                // Then delete the next span and set next to the span after it.
9160                current->end = max(current->end, next->end);
9161                next = spans.erase(next);
9162            }
9163        }
9164
9165        // Now we can check if the incoming range is within any of the spans.
9166        bool contained_in_a_range = false;
9167        for (uint32_t i = 0; i < spans.size(); ++i) {
9168            if ((offset >= spans[i].start) && ((uint64_t)offset + (uint64_t)size <= (uint64_t)spans[i].end)) {
9169                contained_in_a_range = true;
9170                break;
9171            }
9172        }
9173        if (!contained_in_a_range) {
9174            skip_call |=
9175                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9176                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() Push constant range [%d, %d) "
9177                                                              "with stageFlags = 0x%" PRIx32 " "
9178                                                              "not within flag-matching ranges in pipeline layout 0x%" PRIx64 ".",
9179                        offset, offset + size, (uint32_t)stageFlags, (uint64_t)layout);
9180        }
9181    }
9182    lock.unlock();
9183    if (!skip_call)
9184        dev_data->device_dispatch_table->CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
9185}
9186
9187VKAPI_ATTR void VKAPI_CALL
9188CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
9189    bool skip_call = false;
9190    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9191    std::unique_lock<std::mutex> lock(global_lock);
9192    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9193    if (pCB) {
9194        QueryObject query = {queryPool, slot};
9195        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
9196        pCB->queryUpdates.push_back(queryUpdate);
9197        if (pCB->state == CB_RECORDING) {
9198            skip_call |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
9199        } else {
9200            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
9201        }
9202    }
9203    lock.unlock();
9204    if (!skip_call)
9205        dev_data->device_dispatch_table->CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
9206}
9207
9208static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
9209                       const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag) {
9210    bool skip_call = false;
9211
9212    for (uint32_t attach = 0; attach < count; attach++) {
9213        if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
9214            // Attachment counts are verified elsewhere, but prevent an invalid access
9215            if (attachments[attach].attachment < fbci->attachmentCount) {
9216                const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
9217                auto view_state = getImageViewState(dev_data, *image_view);
9218                if (view_state) {
9219                    const VkImageCreateInfo *ici = &getImageNode(dev_data, view_state->create_info.image)->createInfo;
9220                    if (ici != nullptr) {
9221                        if ((ici->usage & usage_flag) == 0) {
9222                            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9223                                                 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_USAGE, "DS",
9224                                                 "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
9225                                                 "IMAGE_USAGE flags (%s).",
9226                                                 attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
9227                        }
9228                    }
9229                }
9230            }
9231        }
9232    }
9233    return skip_call;
9234}
9235
9236// Validate VkFramebufferCreateInfo which includes:
9237// 1. attachmentCount equals renderPass attachmentCount
9238// 2. corresponding framebuffer and renderpass attachments have matching formats
9239// 3. corresponding framebuffer and renderpass attachments have matching sample counts
9240// 4. fb attachments only have a single mip level
9241// 5. fb attachment dimensions are each at least as large as the fb
9242// 6. fb attachments use idenity swizzle
9243// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
9244// 8. fb dimensions are within physical device limits
9245static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
9246    bool skip_call = false;
9247
9248    auto rp_node = getRenderPass(dev_data, pCreateInfo->renderPass);
9249    if (rp_node) {
9250        const VkRenderPassCreateInfo *rpci = rp_node->pCreateInfo;
9251        if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
9252            skip_call |= log_msg(
9253                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9254                reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9255                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of "
9256                "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer.",
9257                pCreateInfo->attachmentCount, rpci->attachmentCount, reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9258        } else {
9259            // attachmentCounts match, so make sure corresponding attachment details line up
9260            const VkImageView *image_views = pCreateInfo->pAttachments;
9261            for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9262                auto view_state = getImageViewState(dev_data, image_views[i]);
9263                auto &ivci = view_state->create_info;
9264                if (ivci.format != rpci->pAttachments[i].format) {
9265                    skip_call |= log_msg(
9266                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9267                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
9268                        "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match "
9269                              "the format of "
9270                              "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").",
9271                        i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
9272                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9273                }
9274                const VkImageCreateInfo *ici = &getImageNode(dev_data, ivci.image)->createInfo;
9275                if (ici->samples != rpci->pAttachments[i].samples) {
9276                    skip_call |= log_msg(
9277                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9278                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
9279                        "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match "
9280                              "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").",
9281                        i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
9282                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9283                }
9284                // Verify that view only has a single mip level
9285                if (ivci.subresourceRange.levelCount != 1) {
9286                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
9287                                         __LINE__, DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9288                                         "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u "
9289                                         "but only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer.",
9290                                         i, ivci.subresourceRange.levelCount);
9291                }
9292                const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
9293                uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
9294                uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
9295                if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
9296                    (mip_height < pCreateInfo->height)) {
9297                    skip_call |=
9298                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9299                                DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9300                                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller "
9301                                "than the corresponding "
9302                                "framebuffer dimensions. Attachment dimensions must be at least as large. Here are the respective "
9303                                "dimensions for "
9304                                "attachment #%u, framebuffer:\n"
9305                                "width: %u, %u\n"
9306                                "height: %u, %u\n"
9307                                "layerCount: %u, %u\n",
9308                                i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
9309                                pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
9310                }
9311                if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
9312                    ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
9313                    ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
9314                    ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
9315                    skip_call |= log_msg(
9316                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9317                        DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9318                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer "
9319                        "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n"
9320                        "r swizzle = %s\n"
9321                        "g swizzle = %s\n"
9322                        "b swizzle = %s\n"
9323                        "a swizzle = %s\n",
9324                        i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
9325                        string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a));
9326                }
9327            }
9328        }
9329        // Verify correct attachment usage flags
9330        for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
9331            // Verify input attachments:
9332            skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount,
9333                                    rpci->pSubpasses[subpass].pInputAttachments, pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT);
9334            // Verify color attachments:
9335            skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount,
9336                                    rpci->pSubpasses[subpass].pColorAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
9337            // Verify depth/stencil attachments:
9338            if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
9339                skip_call |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
9340                                        VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
9341            }
9342        }
9343    } else {
9344        skip_call |=
9345            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9346                    reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9347                    "vkCreateFramebuffer(): Attempt to create framebuffer with invalid renderPass (0x%" PRIxLEAST64 ").",
9348                    reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9349    }
9350    // Verify FB dimensions are within physical device limits
9351    if ((pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) ||
9352        (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) ||
9353        (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers)) {
9354        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9355                             DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9356                             "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo dimensions exceed physical device limits. "
9357                             "Here are the respective dimensions: requested, device max:\n"
9358                             "width: %u, %u\n"
9359                             "height: %u, %u\n"
9360                             "layerCount: %u, %u\n",
9361                             pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
9362                             pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
9363                             pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers);
9364    }
9365    return skip_call;
9366}
9367
9368// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
9369//  Return true if an error is encountered and callback returns true to skip call down chain
9370//   false indicates that call down chain should proceed
9371static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
9372    // TODO : Verify that renderPass FB is created with is compatible with FB
9373    bool skip_call = false;
9374    skip_call |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
9375    return skip_call;
9376}
9377
9378// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
9379static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
9380    // Shadow create info and store in map
9381    std::unique_ptr<FRAMEBUFFER_NODE> fb_node(
9382        new FRAMEBUFFER_NODE(fb, pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->pCreateInfo));
9383
9384    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9385        VkImageView view = pCreateInfo->pAttachments[i];
9386        auto view_state = getImageViewState(dev_data, view);
9387        if (!view_state) {
9388            continue;
9389        }
9390        MT_FB_ATTACHMENT_INFO fb_info;
9391        fb_info.mem = getImageNode(dev_data, view_state->create_info.image)->mem;
9392        fb_info.view_state = view_state;
9393        fb_info.image = view_state->create_info.image;
9394        fb_node->attachments.push_back(fb_info);
9395    }
9396    dev_data->frameBufferMap[fb] = std::move(fb_node);
9397}
9398
9399VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
9400                                                 const VkAllocationCallbacks *pAllocator,
9401                                                 VkFramebuffer *pFramebuffer) {
9402    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9403    std::unique_lock<std::mutex> lock(global_lock);
9404    bool skip_call = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
9405    lock.unlock();
9406
9407    if (skip_call)
9408        return VK_ERROR_VALIDATION_FAILED_EXT;
9409
9410    VkResult result = dev_data->device_dispatch_table->CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
9411
9412    if (VK_SUCCESS == result) {
9413        lock.lock();
9414        PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
9415        lock.unlock();
9416    }
9417    return result;
9418}
9419
9420static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
9421                           std::unordered_set<uint32_t> &processed_nodes) {
9422    // If we have already checked this node we have not found a dependency path so return false.
9423    if (processed_nodes.count(index))
9424        return false;
9425    processed_nodes.insert(index);
9426    const DAGNode &node = subpass_to_node[index];
9427    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
9428    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
9429        for (auto elem : node.prev) {
9430            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
9431                return true;
9432        }
9433    } else {
9434        return true;
9435    }
9436    return false;
9437}
9438
9439static bool CheckDependencyExists(const layer_data *my_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
9440                                  const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
9441    bool result = true;
9442    // Loop through all subpasses that share the same attachment and make sure a dependency exists
9443    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
9444        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k])
9445            continue;
9446        const DAGNode &node = subpass_to_node[subpass];
9447        // Check for a specified dependency between the two nodes. If one exists we are done.
9448        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
9449        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
9450        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
9451            // If no dependency exits an implicit dependency still might. If not, throw an error.
9452            std::unordered_set<uint32_t> processed_nodes;
9453            if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
9454                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
9455                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9456                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9457                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
9458                                     dependent_subpasses[k]);
9459                result = false;
9460            }
9461        }
9462    }
9463    return result;
9464}
9465
9466static bool CheckPreserved(const layer_data *my_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
9467                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
9468    const DAGNode &node = subpass_to_node[index];
9469    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
9470    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9471    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9472        if (attachment == subpass.pColorAttachments[j].attachment)
9473            return true;
9474    }
9475    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9476        if (attachment == subpass.pDepthStencilAttachment->attachment)
9477            return true;
9478    }
9479    bool result = false;
9480    // Loop through previous nodes and see if any of them write to the attachment.
9481    for (auto elem : node.prev) {
9482        result |= CheckPreserved(my_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
9483    }
9484    // If the attachment was written to by a previous node than this node needs to preserve it.
9485    if (result && depth > 0) {
9486        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9487        bool has_preserved = false;
9488        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9489            if (subpass.pPreserveAttachments[j] == attachment) {
9490                has_preserved = true;
9491                break;
9492            }
9493        }
9494        if (!has_preserved) {
9495            skip_call |=
9496                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9497                        DRAWSTATE_INVALID_RENDERPASS, "DS",
9498                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
9499        }
9500    }
9501    return result;
9502}
9503
9504template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
9505    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
9506           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
9507}
9508
9509bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
9510    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
9511            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
9512}
9513
9514static bool ValidateDependencies(const layer_data *my_data, FRAMEBUFFER_NODE const * framebuffer,
9515                                 RENDER_PASS_NODE const * renderPass) {
9516    bool skip_call = false;
9517    const safe_VkFramebufferCreateInfo *pFramebufferInfo = &framebuffer->createInfo;
9518    const VkRenderPassCreateInfo *pCreateInfo = renderPass->pCreateInfo;
9519    auto const & subpass_to_node = renderPass->subpassToNode;
9520    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
9521    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
9522    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
9523    // Find overlapping attachments
9524    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9525        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
9526            VkImageView viewi = pFramebufferInfo->pAttachments[i];
9527            VkImageView viewj = pFramebufferInfo->pAttachments[j];
9528            if (viewi == viewj) {
9529                overlapping_attachments[i].push_back(j);
9530                overlapping_attachments[j].push_back(i);
9531                continue;
9532            }
9533            auto view_state_i = getImageViewState(my_data, viewi);
9534            auto view_state_j = getImageViewState(my_data, viewj);
9535            if (!view_state_i || !view_state_j) {
9536                continue;
9537            }
9538            auto view_ci_i = view_state_i->create_info;
9539            auto view_ci_j = view_state_j->create_info;
9540            if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
9541                overlapping_attachments[i].push_back(j);
9542                overlapping_attachments[j].push_back(i);
9543                continue;
9544            }
9545            auto image_data_i = getImageNode(my_data, view_ci_i.image);
9546            auto image_data_j = getImageNode(my_data, view_ci_j.image);
9547            if (!image_data_i || !image_data_j) {
9548                continue;
9549            }
9550            if (image_data_i->mem == image_data_j->mem && isRangeOverlapping(image_data_i->memOffset, image_data_i->memSize,
9551                                                                             image_data_j->memOffset, image_data_j->memSize)) {
9552                overlapping_attachments[i].push_back(j);
9553                overlapping_attachments[j].push_back(i);
9554            }
9555        }
9556    }
9557    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
9558        uint32_t attachment = i;
9559        for (auto other_attachment : overlapping_attachments[i]) {
9560            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9561                skip_call |=
9562                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9563                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9564                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9565                            attachment, other_attachment);
9566            }
9567            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9568                skip_call |=
9569                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9570                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9571                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9572                            other_attachment, attachment);
9573            }
9574        }
9575    }
9576    // Find for each attachment the subpasses that use them.
9577    unordered_set<uint32_t> attachmentIndices;
9578    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9579        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9580        attachmentIndices.clear();
9581        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9582            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9583            if (attachment == VK_ATTACHMENT_UNUSED)
9584                continue;
9585            input_attachment_to_subpass[attachment].push_back(i);
9586            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9587                input_attachment_to_subpass[overlapping_attachment].push_back(i);
9588            }
9589        }
9590        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9591            uint32_t attachment = subpass.pColorAttachments[j].attachment;
9592            if (attachment == VK_ATTACHMENT_UNUSED)
9593                continue;
9594            output_attachment_to_subpass[attachment].push_back(i);
9595            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9596                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9597            }
9598            attachmentIndices.insert(attachment);
9599        }
9600        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9601            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9602            output_attachment_to_subpass[attachment].push_back(i);
9603            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9604                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9605            }
9606
9607            if (attachmentIndices.count(attachment)) {
9608                skip_call |=
9609                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
9610                            0, __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9611                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).",
9612                            attachment, i);
9613            }
9614        }
9615    }
9616    // If there is a dependency needed make sure one exists
9617    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9618        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9619        // If the attachment is an input then all subpasses that output must have a dependency relationship
9620        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9621            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9622            if (attachment == VK_ATTACHMENT_UNUSED)
9623                continue;
9624            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9625        }
9626        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
9627        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9628            uint32_t attachment = subpass.pColorAttachments[j].attachment;
9629            if (attachment == VK_ATTACHMENT_UNUSED)
9630                continue;
9631            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9632            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9633        }
9634        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9635            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
9636            CheckDependencyExists(my_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9637            CheckDependencyExists(my_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9638        }
9639    }
9640    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
9641    // written.
9642    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9643        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9644        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9645            CheckPreserved(my_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
9646        }
9647    }
9648    return skip_call;
9649}
9650// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
9651// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that
9652// READ_ONLY layout attachments don't have CLEAR as their loadOp.
9653static bool ValidateLayoutVsAttachmentDescription(debug_report_data *report_data, const VkImageLayout first_layout,
9654                                                  const uint32_t attachment,
9655                                                  const VkAttachmentDescription &attachment_description) {
9656    bool skip_call = false;
9657    // Verify that initial loadOp on READ_ONLY attachments is not CLEAR
9658    if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
9659        if ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
9660            (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)) {
9661            skip_call |=
9662                log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9663                        VkDebugReportObjectTypeEXT(0), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9664                        "Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout));
9665        }
9666    }
9667    return skip_call;
9668}
9669
9670static bool ValidateLayouts(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
9671    bool skip = false;
9672
9673    // Track when we're observing the first use of an attachment
9674    std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true);
9675    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9676        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9677        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9678            auto attach_index = subpass.pColorAttachments[j].attachment;
9679            if (attach_index == VK_ATTACHMENT_UNUSED)
9680                continue;
9681
9682            switch (subpass.pColorAttachments[j].layout) {
9683            case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
9684                /* This is ideal. */
9685                break;
9686
9687            case VK_IMAGE_LAYOUT_GENERAL:
9688                /* May not be optimal; TODO: reconsider this warning based on
9689                 * other constraints?
9690                 */
9691                skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9692                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
9693                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9694                                "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
9695                break;
9696
9697            default:
9698                skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9699                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
9700                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9701                                "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
9702                                string_VkImageLayout(subpass.pColorAttachments[j].layout));
9703            }
9704
9705            if (attach_first_use[attach_index]) {
9706                skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pColorAttachments[j].layout,
9707                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
9708            }
9709            attach_first_use[attach_index] = false;
9710        }
9711        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9712            switch (subpass.pDepthStencilAttachment->layout) {
9713            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
9714            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
9715                /* These are ideal. */
9716                break;
9717
9718            case VK_IMAGE_LAYOUT_GENERAL:
9719                /* May not be optimal; TODO: reconsider this warning based on
9720                 * other constraints? GENERAL can be better than doing a bunch
9721                 * of transitions.
9722                 */
9723                skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9724                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
9725                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9726                                "GENERAL layout for depth attachment may not give optimal performance.");
9727                break;
9728
9729            default:
9730                /* No other layouts are acceptable */
9731                skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9732                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
9733                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9734                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "
9735                                "DEPTH_STENCIL_READ_ONLY_OPTIMAL or GENERAL.",
9736                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
9737            }
9738
9739            auto attach_index = subpass.pDepthStencilAttachment->attachment;
9740            if (attach_first_use[attach_index]) {
9741                skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pDepthStencilAttachment->layout,
9742                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
9743            }
9744            attach_first_use[attach_index] = false;
9745        }
9746        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9747            auto attach_index = subpass.pInputAttachments[j].attachment;
9748            if (attach_index == VK_ATTACHMENT_UNUSED)
9749                continue;
9750
9751            switch (subpass.pInputAttachments[j].layout) {
9752            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
9753            case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
9754                /* These are ideal. */
9755                break;
9756
9757            case VK_IMAGE_LAYOUT_GENERAL:
9758                /* May not be optimal. TODO: reconsider this warning based on
9759                 * other constraints.
9760                 */
9761                skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9762                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
9763                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9764                                "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
9765                break;
9766
9767            default:
9768                /* No other layouts are acceptable */
9769                skip |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9770                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9771                                "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
9772                                string_VkImageLayout(subpass.pInputAttachments[j].layout));
9773            }
9774
9775            if (attach_first_use[attach_index]) {
9776                skip |= ValidateLayoutVsAttachmentDescription(my_data->report_data, subpass.pInputAttachments[j].layout,
9777                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
9778            }
9779            attach_first_use[attach_index] = false;
9780        }
9781    }
9782    return skip;
9783}
9784
9785static bool CreatePassDAG(const layer_data *my_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9786                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
9787    bool skip_call = false;
9788    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9789        DAGNode &subpass_node = subpass_to_node[i];
9790        subpass_node.pass = i;
9791    }
9792    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9793        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
9794        if (dependency.srcSubpass > dependency.dstSubpass && dependency.srcSubpass != VK_SUBPASS_EXTERNAL &&
9795            dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9796            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9797                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
9798                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
9799        } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL && dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
9800            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9801                                 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
9802        } else if (dependency.srcSubpass == dependency.dstSubpass) {
9803            has_self_dependency[dependency.srcSubpass] = true;
9804        }
9805        if (dependency.dstSubpass != VK_SUBPASS_EXTERNAL) {
9806            subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
9807        }
9808        if (dependency.srcSubpass != VK_SUBPASS_EXTERNAL) {
9809            subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
9810        }
9811    }
9812    return skip_call;
9813}
9814
9815
9816VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
9817                                                  const VkAllocationCallbacks *pAllocator,
9818                                                  VkShaderModule *pShaderModule) {
9819    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9820    bool skip_call = false;
9821
9822    /* Use SPIRV-Tools validator to try and catch any issues with the module itself */
9823    spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
9824    spv_const_binary_t binary { pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t) };
9825    spv_diagnostic diag = nullptr;
9826
9827    auto result = spvValidate(ctx, &binary, &diag);
9828    if (result != SPV_SUCCESS) {
9829        skip_call |= log_msg(my_data->report_data,
9830                             result == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
9831                             VkDebugReportObjectTypeEXT(0), 0,
9832                             __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC", "SPIR-V module not valid: %s",
9833                             diag && diag->error ? diag->error : "(no error text)");
9834    }
9835
9836    spvDiagnosticDestroy(diag);
9837    spvContextDestroy(ctx);
9838
9839    if (skip_call)
9840        return VK_ERROR_VALIDATION_FAILED_EXT;
9841
9842    VkResult res = my_data->device_dispatch_table->CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
9843
9844    if (res == VK_SUCCESS) {
9845        std::lock_guard<std::mutex> lock(global_lock);
9846        my_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
9847    }
9848    return res;
9849}
9850
9851static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
9852    bool skip_call = false;
9853    if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
9854        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9855                             DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
9856                             "CreateRenderPass: %s attachment %d cannot be greater than the total number of attachments %d.",
9857                             type, attachment, attachment_count);
9858    }
9859    return skip_call;
9860}
9861
9862static bool IsPowerOfTwo(unsigned x) {
9863    return x && !(x & (x-1));
9864}
9865
9866static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
9867    bool skip_call = false;
9868    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9869        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9870        if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
9871            skip_call |=
9872                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9873                        DRAWSTATE_INVALID_RENDERPASS, "DS",
9874                        "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
9875        }
9876        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9877            uint32_t attachment = subpass.pPreserveAttachments[j];
9878            if (attachment == VK_ATTACHMENT_UNUSED) {
9879                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9880                                     __LINE__, DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
9881                                     "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", j);
9882            } else {
9883                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
9884            }
9885        }
9886
9887        auto subpass_performs_resolve = subpass.pResolveAttachments && std::any_of(
9888            subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
9889            [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
9890
9891        unsigned sample_count = 0;
9892
9893        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9894            uint32_t attachment;
9895            if (subpass.pResolveAttachments) {
9896                attachment = subpass.pResolveAttachments[j].attachment;
9897                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
9898
9899                if (!skip_call && attachment != VK_ATTACHMENT_UNUSED &&
9900                    pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
9901                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
9902                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9903                                         "CreateRenderPass:  Subpass %u requests multisample resolve into attachment %u, "
9904                                         "which must have VK_SAMPLE_COUNT_1_BIT but has %s",
9905                                         i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples));
9906                }
9907            }
9908            attachment = subpass.pColorAttachments[j].attachment;
9909            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
9910
9911            if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
9912                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
9913
9914                if (subpass_performs_resolve &&
9915                    pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
9916                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
9917                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9918                                         "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
9919                                         "which has VK_SAMPLE_COUNT_1_BIT",
9920                                         i, attachment);
9921                }
9922            }
9923        }
9924
9925        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9926            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9927            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
9928
9929            if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
9930                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
9931            }
9932        }
9933
9934        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9935            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9936            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
9937        }
9938
9939        if (sample_count && !IsPowerOfTwo(sample_count)) {
9940            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
9941                                 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9942                                 "CreateRenderPass:  Subpass %u attempts to render to "
9943                                 "attachments with inconsistent sample counts",
9944                                 i);
9945        }
9946    }
9947    return skip_call;
9948}
9949
9950VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9951                                                const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
9952    bool skip_call = false;
9953    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9954
9955    std::unique_lock<std::mutex> lock(global_lock);
9956
9957    skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
9958    // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
9959    //       ValidateLayouts.
9960    skip_call |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
9961    lock.unlock();
9962
9963    if (skip_call) {
9964        return VK_ERROR_VALIDATION_FAILED_EXT;
9965    }
9966
9967    VkResult result = dev_data->device_dispatch_table->CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
9968
9969    if (VK_SUCCESS == result) {
9970        lock.lock();
9971
9972        std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
9973        std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
9974        skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
9975
9976        // Shadow create info and store in map
9977        VkRenderPassCreateInfo *localRPCI = new VkRenderPassCreateInfo(*pCreateInfo);
9978        if (pCreateInfo->pAttachments) {
9979            localRPCI->pAttachments = new VkAttachmentDescription[localRPCI->attachmentCount];
9980            memcpy((void *)localRPCI->pAttachments, pCreateInfo->pAttachments,
9981                   localRPCI->attachmentCount * sizeof(VkAttachmentDescription));
9982        }
9983        if (pCreateInfo->pSubpasses) {
9984            localRPCI->pSubpasses = new VkSubpassDescription[localRPCI->subpassCount];
9985            memcpy((void *)localRPCI->pSubpasses, pCreateInfo->pSubpasses, localRPCI->subpassCount * sizeof(VkSubpassDescription));
9986
9987            for (uint32_t i = 0; i < localRPCI->subpassCount; i++) {
9988                VkSubpassDescription *subpass = (VkSubpassDescription *)&localRPCI->pSubpasses[i];
9989                const uint32_t attachmentCount = subpass->inputAttachmentCount +
9990                                                 subpass->colorAttachmentCount * (1 + (subpass->pResolveAttachments ? 1 : 0)) +
9991                                                 ((subpass->pDepthStencilAttachment) ? 1 : 0) + subpass->preserveAttachmentCount;
9992                VkAttachmentReference *attachments = new VkAttachmentReference[attachmentCount];
9993
9994                memcpy(attachments, subpass->pInputAttachments, sizeof(attachments[0]) * subpass->inputAttachmentCount);
9995                subpass->pInputAttachments = attachments;
9996                attachments += subpass->inputAttachmentCount;
9997
9998                memcpy(attachments, subpass->pColorAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
9999                subpass->pColorAttachments = attachments;
10000                attachments += subpass->colorAttachmentCount;
10001
10002                if (subpass->pResolveAttachments) {
10003                    memcpy(attachments, subpass->pResolveAttachments, sizeof(attachments[0]) * subpass->colorAttachmentCount);
10004                    subpass->pResolveAttachments = attachments;
10005                    attachments += subpass->colorAttachmentCount;
10006                }
10007
10008                if (subpass->pDepthStencilAttachment) {
10009                    memcpy(attachments, subpass->pDepthStencilAttachment, sizeof(attachments[0]) * 1);
10010                    subpass->pDepthStencilAttachment = attachments;
10011                    attachments += 1;
10012                }
10013
10014                memcpy(attachments, subpass->pPreserveAttachments, sizeof(attachments[0]) * subpass->preserveAttachmentCount);
10015                subpass->pPreserveAttachments = &attachments->attachment;
10016            }
10017        }
10018        if (pCreateInfo->pDependencies) {
10019            localRPCI->pDependencies = new VkSubpassDependency[localRPCI->dependencyCount];
10020            memcpy((void *)localRPCI->pDependencies, pCreateInfo->pDependencies,
10021                   localRPCI->dependencyCount * sizeof(VkSubpassDependency));
10022        }
10023
10024        auto render_pass = new RENDER_PASS_NODE(localRPCI);
10025        render_pass->renderPass = *pRenderPass;
10026        render_pass->hasSelfDependency = has_self_dependency;
10027        render_pass->subpassToNode = subpass_to_node;
10028#if MTMERGESOURCE
10029        // MTMTODO : Merge with code from above to eliminate duplication
10030        for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
10031            VkAttachmentDescription desc = pCreateInfo->pAttachments[i];
10032            MT_PASS_ATTACHMENT_INFO pass_info;
10033            pass_info.load_op = desc.loadOp;
10034            pass_info.store_op = desc.storeOp;
10035            pass_info.stencil_load_op = desc.stencilLoadOp;
10036            pass_info.stencil_store_op = desc.stencilStoreOp;
10037            pass_info.attachment = i;
10038            render_pass->attachments.push_back(pass_info);
10039        }
10040        // TODO: Maybe fill list and then copy instead of locking
10041        std::unordered_map<uint32_t, bool> &attachment_first_read = render_pass->attachment_first_read;
10042        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = render_pass->attachment_first_layout;
10043        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10044            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10045            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10046                uint32_t attachment = subpass.pColorAttachments[j].attachment;
10047                if (!attachment_first_read.count(attachment)) {
10048                    attachment_first_read.insert(std::make_pair(attachment, false));
10049                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
10050                }
10051            }
10052            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10053                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
10054                if (!attachment_first_read.count(attachment)) {
10055                    attachment_first_read.insert(std::make_pair(attachment, false));
10056                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
10057                }
10058            }
10059            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10060                uint32_t attachment = subpass.pInputAttachments[j].attachment;
10061                if (!attachment_first_read.count(attachment)) {
10062                    attachment_first_read.insert(std::make_pair(attachment, true));
10063                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
10064                }
10065            }
10066        }
10067#endif
10068        dev_data->renderPassMap[*pRenderPass] = render_pass;
10069    }
10070    return result;
10071}
10072
10073// Free the renderpass shadow
10074static void deleteRenderPasses(layer_data *my_data) {
10075    for (auto renderPass : my_data->renderPassMap) {
10076        const VkRenderPassCreateInfo *pRenderPassInfo = renderPass.second->pCreateInfo;
10077        delete[] pRenderPassInfo->pAttachments;
10078        if (pRenderPassInfo->pSubpasses) {
10079            for (uint32_t i = 0; i < pRenderPassInfo->subpassCount; ++i) {
10080                // Attachements are all allocated in a block, so just need to
10081                //  find the first non-null one to delete
10082                if (pRenderPassInfo->pSubpasses[i].pInputAttachments) {
10083                    delete[] pRenderPassInfo->pSubpasses[i].pInputAttachments;
10084                } else if (pRenderPassInfo->pSubpasses[i].pColorAttachments) {
10085                    delete[] pRenderPassInfo->pSubpasses[i].pColorAttachments;
10086                } else if (pRenderPassInfo->pSubpasses[i].pResolveAttachments) {
10087                    delete[] pRenderPassInfo->pSubpasses[i].pResolveAttachments;
10088                } else if (pRenderPassInfo->pSubpasses[i].pPreserveAttachments) {
10089                    delete[] pRenderPassInfo->pSubpasses[i].pPreserveAttachments;
10090                }
10091            }
10092            delete[] pRenderPassInfo->pSubpasses;
10093        }
10094        delete[] pRenderPassInfo->pDependencies;
10095        delete pRenderPassInfo;
10096        delete renderPass.second;
10097    }
10098    my_data->renderPassMap.clear();
10099}
10100
10101static bool VerifyFramebufferAndRenderPassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
10102    bool skip_call = false;
10103    const VkRenderPassCreateInfo *pRenderPassInfo = dev_data->renderPassMap[pRenderPassBegin->renderPass]->pCreateInfo;
10104    const safe_VkFramebufferCreateInfo framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer]->createInfo;
10105    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
10106        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10107                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
10108                                                                 "with a different number of attachments.");
10109    }
10110    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
10111        const VkImageView &image_view = framebufferInfo.pAttachments[i];
10112        auto view_state = getImageViewState(dev_data, image_view);
10113        assert(view_state);
10114        const VkImage &image = view_state->create_info.image;
10115        const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange;
10116        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
10117                                             pRenderPassInfo->pAttachments[i].initialLayout};
10118        // TODO: Do not iterate over every possibility - consolidate where possible
10119        for (uint32_t j = 0; j < subRange.levelCount; j++) {
10120            uint32_t level = subRange.baseMipLevel + j;
10121            for (uint32_t k = 0; k < subRange.layerCount; k++) {
10122                uint32_t layer = subRange.baseArrayLayer + k;
10123                VkImageSubresource sub = {subRange.aspectMask, level, layer};
10124                IMAGE_CMD_BUF_LAYOUT_NODE node;
10125                if (!FindLayout(pCB, image, sub, node)) {
10126                    SetLayout(pCB, image, sub, newNode);
10127                    continue;
10128                }
10129                if (newNode.layout != VK_IMAGE_LAYOUT_UNDEFINED &&
10130                    newNode.layout != node.layout) {
10131                    skip_call |=
10132                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10133                                DRAWSTATE_INVALID_RENDERPASS, "DS",
10134                                "You cannot start a render pass using attachment %u "
10135                                "where the render pass initial layout is %s and the previous "
10136                                "known layout of the attachment is %s. The layouts must match, or "
10137                                "the render pass initial layout for the attachment must be "
10138                                "VK_IMAGE_LAYOUT_UNDEFINED",
10139                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
10140                }
10141            }
10142        }
10143    }
10144    return skip_call;
10145}
10146
10147static void TransitionAttachmentRefLayout(layer_data *dev_data, GLOBAL_CB_NODE *pCB,
10148                                          FRAMEBUFFER_NODE *pFramebuffer,
10149                                          VkAttachmentReference ref)
10150{
10151    if (ref.attachment != VK_ATTACHMENT_UNUSED) {
10152        auto image_view = pFramebuffer->createInfo.pAttachments[ref.attachment];
10153        SetLayout(dev_data, pCB, image_view, ref.layout);
10154    }
10155}
10156
10157static void TransitionSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
10158                                     const int subpass_index) {
10159    auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
10160    if (!renderPass)
10161        return;
10162
10163    auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer);
10164    if (!framebuffer)
10165        return;
10166
10167    const VkSubpassDescription &subpass = renderPass->pCreateInfo->pSubpasses[subpass_index];
10168    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10169        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pInputAttachments[j]);
10170    }
10171    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10172        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pColorAttachments[j]);
10173    }
10174    if (subpass.pDepthStencilAttachment) {
10175        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, *subpass.pDepthStencilAttachment);
10176    }
10177}
10178
10179static bool validatePrimaryCommandBuffer(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
10180    bool skip_call = false;
10181    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
10182        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10183                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
10184                             cmd_name.c_str());
10185    }
10186    return skip_call;
10187}
10188
10189static void TransitionFinalSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
10190    auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
10191    if (!renderPass)
10192        return;
10193
10194    const VkRenderPassCreateInfo *pRenderPassInfo = renderPass->pCreateInfo;
10195    auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer);
10196    if (!framebuffer)
10197        return;
10198
10199    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
10200        auto image_view = framebuffer->createInfo.pAttachments[i];
10201        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
10202    }
10203}
10204
10205static bool VerifyRenderAreaBounds(const layer_data *my_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
10206    bool skip_call = false;
10207    const safe_VkFramebufferCreateInfo *pFramebufferInfo = &getFramebuffer(my_data, pRenderPassBegin->framebuffer)->createInfo;
10208    if (pRenderPassBegin->renderArea.offset.x < 0 ||
10209        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
10210        pRenderPassBegin->renderArea.offset.y < 0 ||
10211        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
10212        skip_call |= static_cast<bool>(log_msg(
10213            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10214            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
10215            "Cannot execute a render pass with renderArea not within the bound of the "
10216            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
10217            "height %d.",
10218            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
10219            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
10220    }
10221    return skip_call;
10222}
10223
10224// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
10225// [load|store]Op flag must be checked
10226// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
10227template <typename T> static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
10228    if (color_depth_op != op && stencil_op != op) {
10229        return false;
10230    }
10231    bool check_color_depth_load_op = !vk_format_is_stencil_only(format);
10232    bool check_stencil_load_op = vk_format_is_depth_and_stencil(format) || !check_color_depth_load_op;
10233
10234    return (((check_color_depth_load_op == true) && (color_depth_op == op)) ||
10235            ((check_stencil_load_op == true) && (stencil_op == op)));
10236}
10237
10238VKAPI_ATTR void VKAPI_CALL
10239CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
10240    bool skip_call = false;
10241    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10242    std::unique_lock<std::mutex> lock(global_lock);
10243    GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
10244    auto renderPass = pRenderPassBegin ? getRenderPass(dev_data, pRenderPassBegin->renderPass) : nullptr;
10245    auto framebuffer = pRenderPassBegin ? getFramebuffer(dev_data, pRenderPassBegin->framebuffer) : nullptr;
10246    if (cb_node) {
10247        if (renderPass) {
10248            uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
10249            cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
10250            for (size_t i = 0; i < renderPass->attachments.size(); ++i) {
10251                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
10252                VkFormat format = renderPass->pCreateInfo->pAttachments[renderPass->attachments[i].attachment].format;
10253                if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op,
10254                                                         renderPass->attachments[i].stencil_load_op,
10255                                                         VK_ATTACHMENT_LOAD_OP_CLEAR)) {
10256                    clear_op_size = static_cast<uint32_t>(i) + 1;
10257                    std::function<bool()> function = [=]() {
10258                        SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), true);
10259                        return false;
10260                    };
10261                    cb_node->validate_functions.push_back(function);
10262                } else if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op,
10263                                                                renderPass->attachments[i].stencil_load_op,
10264                                                                VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
10265                    std::function<bool()> function = [=]() {
10266                        SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), false);
10267                        return false;
10268                    };
10269                    cb_node->validate_functions.push_back(function);
10270                } else if (FormatSpecificLoadAndStoreOpSettings(format, renderPass->attachments[i].load_op,
10271                                                                renderPass->attachments[i].stencil_load_op,
10272                                                                VK_ATTACHMENT_LOAD_OP_LOAD)) {
10273                    std::function<bool()> function = [=]() {
10274                        return ValidateImageMemoryIsValid(dev_data, getImageNode(dev_data, fb_info.image),
10275                                                          "vkCmdBeginRenderPass()");
10276                    };
10277                    cb_node->validate_functions.push_back(function);
10278                }
10279                if (renderPass->attachment_first_read[renderPass->attachments[i].attachment]) {
10280                    std::function<bool()> function = [=]() {
10281                        return ValidateImageMemoryIsValid(dev_data, getImageNode(dev_data, fb_info.image),
10282                                                          "vkCmdBeginRenderPass()");
10283                    };
10284                    cb_node->validate_functions.push_back(function);
10285                }
10286            }
10287            if (clear_op_size > pRenderPassBegin->clearValueCount) {
10288                skip_call |=
10289                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
10290                            reinterpret_cast<uint64_t &>(renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
10291                            "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there must "
10292                            "be at least %u "
10293                            "entries in pClearValues array to account for the highest index attachment in renderPass 0x%" PRIx64
10294                            " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array "
10295                            "is indexed by attachment number so even if some pClearValues entries between 0 and %u correspond to "
10296                            "attachments that aren't cleared they will be ignored.",
10297                            pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(renderPass),
10298                            clear_op_size, clear_op_size - 1);
10299            }
10300            skip_call |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
10301            skip_call |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin);
10302            skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass");
10303            skip_call |= ValidateDependencies(dev_data, framebuffer, renderPass);
10304            skip_call |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass");
10305            skip_call |= addCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
10306            cb_node->activeRenderPass = renderPass;
10307            // This is a shallow copy as that is all that is needed for now
10308            cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
10309            cb_node->activeSubpass = 0;
10310            cb_node->activeSubpassContents = contents;
10311            cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
10312            // Connect this framebuffer and its children to this cmdBuffer
10313            AddFramebufferBinding(dev_data, cb_node, framebuffer);
10314            // transition attachments to the correct layouts for the first subpass
10315            TransitionSubpassLayouts(dev_data, cb_node, &cb_node->activeRenderPassBeginInfo, cb_node->activeSubpass);
10316        } else {
10317            skip_call |=
10318                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10319                        DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
10320        }
10321    }
10322    lock.unlock();
10323    if (!skip_call) {
10324        dev_data->device_dispatch_table->CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
10325    }
10326}
10327
10328VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
10329    bool skip_call = false;
10330    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10331    std::unique_lock<std::mutex> lock(global_lock);
10332    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10333    if (pCB) {
10334        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
10335        skip_call |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
10336        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
10337
10338        auto subpassCount = pCB->activeRenderPass->pCreateInfo->subpassCount;
10339        if (pCB->activeSubpass == subpassCount - 1) {
10340            skip_call |=
10341                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10342                        reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SUBPASS_INDEX, "DS",
10343                        "vkCmdNextSubpass(): Attempted to advance beyond final subpass");
10344        }
10345    }
10346    lock.unlock();
10347
10348    if (skip_call)
10349        return;
10350
10351    dev_data->device_dispatch_table->CmdNextSubpass(commandBuffer, contents);
10352
10353    if (pCB) {
10354      lock.lock();
10355      pCB->activeSubpass++;
10356      pCB->activeSubpassContents = contents;
10357      TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
10358    }
10359}
10360
10361VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
10362    bool skip_call = false;
10363    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10364    std::unique_lock<std::mutex> lock(global_lock);
10365    auto pCB = getCBNode(dev_data, commandBuffer);
10366    if (pCB) {
10367        RENDER_PASS_NODE* pRPNode = pCB->activeRenderPass;
10368        auto framebuffer = getFramebuffer(dev_data, pCB->activeFramebuffer);
10369        if (pRPNode) {
10370            if (pCB->activeSubpass != pRPNode->pCreateInfo->subpassCount - 1) {
10371                skip_call |=
10372                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10373                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SUBPASS_INDEX, "DS",
10374                            "vkCmdEndRenderPass(): Called before reaching final subpass");
10375            }
10376
10377            for (size_t i = 0; i < pRPNode->attachments.size(); ++i) {
10378                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
10379                VkFormat format = pRPNode->pCreateInfo->pAttachments[pRPNode->attachments[i].attachment].format;
10380                if (FormatSpecificLoadAndStoreOpSettings(format, pRPNode->attachments[i].store_op,
10381                                                         pRPNode->attachments[i].stencil_store_op, VK_ATTACHMENT_STORE_OP_STORE)) {
10382                    std::function<bool()> function = [=]() {
10383                        SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), true);
10384                        return false;
10385                    };
10386                    pCB->validate_functions.push_back(function);
10387                } else if (FormatSpecificLoadAndStoreOpSettings(format, pRPNode->attachments[i].store_op,
10388                                                                pRPNode->attachments[i].stencil_store_op,
10389                                                                VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
10390                    std::function<bool()> function = [=]() {
10391                        SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), false);
10392                        return false;
10393                    };
10394                    pCB->validate_functions.push_back(function);
10395                }
10396            }
10397        }
10398        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
10399        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
10400        skip_call |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
10401    }
10402    lock.unlock();
10403
10404    if (skip_call)
10405        return;
10406
10407    dev_data->device_dispatch_table->CmdEndRenderPass(commandBuffer);
10408
10409    if (pCB) {
10410        lock.lock();
10411        TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo);
10412        pCB->activeRenderPass = nullptr;
10413        pCB->activeSubpass = 0;
10414        pCB->activeFramebuffer = VK_NULL_HANDLE;
10415    }
10416}
10417
10418static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, uint32_t primaryAttach,
10419                                        uint32_t secondaryAttach, const char *msg) {
10420    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10421                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10422                   "vkCmdExecuteCommands() called w/ invalid Secondary Cmd Buffer 0x%" PRIx64 " which has a render pass "
10423                   "that is not compatible with the Primary Cmd Buffer current render pass. "
10424                   "Attachment %u is not compatible with %u: %s",
10425                   reinterpret_cast<uint64_t &>(secondaryBuffer), primaryAttach, secondaryAttach, msg);
10426}
10427
10428static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10429                                            VkRenderPassCreateInfo const *primaryPassCI, uint32_t primaryAttach,
10430                                            VkCommandBuffer secondaryBuffer, VkRenderPassCreateInfo const *secondaryPassCI,
10431                                            uint32_t secondaryAttach, bool is_multi) {
10432    bool skip_call = false;
10433    if (primaryPassCI->attachmentCount <= primaryAttach) {
10434        primaryAttach = VK_ATTACHMENT_UNUSED;
10435    }
10436    if (secondaryPassCI->attachmentCount <= secondaryAttach) {
10437        secondaryAttach = VK_ATTACHMENT_UNUSED;
10438    }
10439    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
10440        return skip_call;
10441    }
10442    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
10443        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
10444                                                 "The first is unused while the second is not.");
10445        return skip_call;
10446    }
10447    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
10448        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
10449                                                 "The second is unused while the first is not.");
10450        return skip_call;
10451    }
10452    if (primaryPassCI->pAttachments[primaryAttach].format != secondaryPassCI->pAttachments[secondaryAttach].format) {
10453        skip_call |=
10454            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different formats.");
10455    }
10456    if (primaryPassCI->pAttachments[primaryAttach].samples != secondaryPassCI->pAttachments[secondaryAttach].samples) {
10457        skip_call |=
10458            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different samples.");
10459    }
10460    if (is_multi && primaryPassCI->pAttachments[primaryAttach].flags != secondaryPassCI->pAttachments[secondaryAttach].flags) {
10461        skip_call |=
10462            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different flags.");
10463    }
10464    return skip_call;
10465}
10466
10467static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10468                                         VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
10469                                         VkRenderPassCreateInfo const *secondaryPassCI, const int subpass, bool is_multi) {
10470    bool skip_call = false;
10471    const VkSubpassDescription &primary_desc = primaryPassCI->pSubpasses[subpass];
10472    const VkSubpassDescription &secondary_desc = secondaryPassCI->pSubpasses[subpass];
10473    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
10474    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
10475        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
10476        if (i < primary_desc.inputAttachmentCount) {
10477            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
10478        }
10479        if (i < secondary_desc.inputAttachmentCount) {
10480            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
10481        }
10482        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_input_attach, secondaryBuffer,
10483                                                     secondaryPassCI, secondary_input_attach, is_multi);
10484    }
10485    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
10486    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
10487        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
10488        if (i < primary_desc.colorAttachmentCount) {
10489            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
10490        }
10491        if (i < secondary_desc.colorAttachmentCount) {
10492            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
10493        }
10494        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_color_attach, secondaryBuffer,
10495                                                     secondaryPassCI, secondary_color_attach, is_multi);
10496        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
10497        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
10498            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
10499        }
10500        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
10501            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
10502        }
10503        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_resolve_attach,
10504                                                     secondaryBuffer, secondaryPassCI, secondary_resolve_attach, is_multi);
10505    }
10506    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
10507    if (primary_desc.pDepthStencilAttachment) {
10508        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
10509    }
10510    if (secondary_desc.pDepthStencilAttachment) {
10511        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
10512    }
10513    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_depthstencil_attach,
10514                                                 secondaryBuffer, secondaryPassCI, secondary_depthstencil_attach, is_multi);
10515    return skip_call;
10516}
10517
10518// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
10519//  This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
10520//  will then feed into this function
10521static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10522                                            VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
10523                                            VkRenderPassCreateInfo const *secondaryPassCI) {
10524    bool skip_call = false;
10525
10526    if (primaryPassCI->subpassCount != secondaryPassCI->subpassCount) {
10527        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10528                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10529                             "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
10530                             " that has a subpassCount of %u that is incompatible with the primary Cmd Buffer 0x%" PRIx64
10531                             " that has a subpassCount of %u.",
10532                             reinterpret_cast<uint64_t &>(secondaryBuffer), secondaryPassCI->subpassCount,
10533                             reinterpret_cast<uint64_t &>(primaryBuffer), primaryPassCI->subpassCount);
10534    } else {
10535        for (uint32_t i = 0; i < primaryPassCI->subpassCount; ++i) {
10536            skip_call |= validateSubpassCompatibility(dev_data, primaryBuffer, primaryPassCI, secondaryBuffer, secondaryPassCI, i,
10537                                                      primaryPassCI->subpassCount > 1);
10538        }
10539    }
10540    return skip_call;
10541}
10542
10543static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
10544                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
10545    bool skip_call = false;
10546    if (!pSubCB->beginInfo.pInheritanceInfo) {
10547        return skip_call;
10548    }
10549    VkFramebuffer primary_fb = pCB->activeFramebuffer;
10550    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
10551    if (secondary_fb != VK_NULL_HANDLE) {
10552        if (primary_fb != secondary_fb) {
10553            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10554                                 DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
10555                                 "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
10556                                 " which has a framebuffer 0x%" PRIx64
10557                                 " that is not the same as the primaryCB's current active framebuffer 0x%" PRIx64 ".",
10558                                 reinterpret_cast<uint64_t &>(secondaryBuffer), reinterpret_cast<uint64_t &>(secondary_fb),
10559                                 reinterpret_cast<uint64_t &>(primary_fb));
10560        }
10561        auto fb = getFramebuffer(dev_data, secondary_fb);
10562        if (!fb) {
10563            skip_call |=
10564                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10565                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10566                                                                          "which has invalid framebuffer 0x%" PRIx64 ".",
10567                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
10568            return skip_call;
10569        }
10570        auto cb_renderpass = getRenderPass(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
10571        if (cb_renderpass->renderPass != fb->createInfo.renderPass) {
10572            skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->renderPassCreateInfo.ptr(), secondaryBuffer,
10573                                                         cb_renderpass->pCreateInfo);
10574        }
10575    }
10576    return skip_call;
10577}
10578
10579static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
10580    bool skip_call = false;
10581    unordered_set<int> activeTypes;
10582    for (auto queryObject : pCB->activeQueries) {
10583        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10584        if (queryPoolData != dev_data->queryPoolMap.end()) {
10585            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
10586                pSubCB->beginInfo.pInheritanceInfo) {
10587                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
10588                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
10589                    skip_call |= log_msg(
10590                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10591                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10592                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10593                        "which has invalid active query pool 0x%" PRIx64 ". Pipeline statistics is being queried so the command "
10594                        "buffer must have all bits set on the queryPool.",
10595                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
10596                }
10597            }
10598            activeTypes.insert(queryPoolData->second.createInfo.queryType);
10599        }
10600    }
10601    for (auto queryObject : pSubCB->startedQueries) {
10602        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10603        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
10604            skip_call |=
10605                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10606                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10607                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10608                        "which has invalid active query pool 0x%" PRIx64 "of type %d but a query of that type has been started on "
10609                        "secondary Cmd Buffer 0x%p.",
10610                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
10611                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
10612        }
10613    }
10614
10615    auto primary_pool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
10616    auto secondary_pool = getCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
10617    if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
10618        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10619                             reinterpret_cast<uint64_t>(pSubCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
10620                             "vkCmdExecuteCommands(): Primary command buffer 0x%" PRIxLEAST64
10621                             " created in queue family %d has secondary command buffer 0x%" PRIxLEAST64 " created in queue family %d.",
10622                             reinterpret_cast<uint64_t>(pCB->commandBuffer), primary_pool->queueFamilyIndex,
10623                             reinterpret_cast<uint64_t>(pSubCB->commandBuffer), secondary_pool->queueFamilyIndex);
10624    }
10625
10626    return skip_call;
10627}
10628
10629VKAPI_ATTR void VKAPI_CALL
10630CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
10631    bool skip_call = false;
10632    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10633    std::unique_lock<std::mutex> lock(global_lock);
10634    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10635    if (pCB) {
10636        GLOBAL_CB_NODE *pSubCB = NULL;
10637        for (uint32_t i = 0; i < commandBuffersCount; i++) {
10638            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
10639            if (!pSubCB) {
10640                skip_call |=
10641                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10642                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10643                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p in element %u of pCommandBuffers array.",
10644                            (void *)pCommandBuffers[i], i);
10645            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
10646                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10647                                     __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10648                                     "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
10649                                     "array. All cmd buffers in pCommandBuffers array must be secondary.",
10650                                     (void *)pCommandBuffers[i], i);
10651            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
10652                auto secondary_rp_node = getRenderPass(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
10653                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
10654                    skip_call |= log_msg(
10655                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10656                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
10657                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
10658                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
10659                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass);
10660                } else {
10661                    // Make sure render pass is compatible with parent command buffer pass if has continue
10662                    if (pCB->activeRenderPass->renderPass != secondary_rp_node->renderPass) {
10663                        skip_call |= validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->pCreateInfo,
10664                                                                    pCommandBuffers[i], secondary_rp_node->pCreateInfo);
10665                    }
10666                    //  If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
10667                    skip_call |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
10668                }
10669                string errorString = "";
10670                // secondaryCB must have been created w/ RP compatible w/ primaryCB active renderpass
10671                if ((pCB->activeRenderPass->renderPass != secondary_rp_node->renderPass) &&
10672                    !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->pCreateInfo, secondary_rp_node->pCreateInfo,
10673                                                     errorString)) {
10674                    skip_call |= log_msg(
10675                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10676                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
10677                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
10678                        ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
10679                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
10680                        (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str());
10681                }
10682            }
10683            // TODO(mlentine): Move more logic into this method
10684            skip_call |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
10685            skip_call |= validateCommandBufferState(dev_data, pSubCB);
10686            // Secondary cmdBuffers are considered pending execution starting w/
10687            // being recorded
10688            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
10689                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
10690                    skip_call |= log_msg(
10691                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10692                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10693                        "Attempt to simultaneously execute CB 0x%" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10694                        "set!",
10695                        (uint64_t)(pCB->commandBuffer));
10696                }
10697                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
10698                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
10699                    skip_call |= log_msg(
10700                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10701                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10702                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIxLEAST64
10703                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
10704                        "(0x%" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10705                        "set, even though it does.",
10706                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
10707                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
10708                }
10709            }
10710            if (!pCB->activeQueries.empty() && !dev_data->phys_dev_properties.features.inheritedQueries) {
10711                skip_call |=
10712                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10713                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
10714                            "vkCmdExecuteCommands(): Secondary Command Buffer "
10715                            "(0x%" PRIxLEAST64 ") cannot be submitted with a query in "
10716                            "flight and inherited queries not "
10717                            "supported on this device.",
10718                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
10719            }
10720            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
10721            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
10722            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
10723            for (auto &function : pSubCB->queryUpdates) {
10724                pCB->queryUpdates.push_back(function);
10725            }
10726        }
10727        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
10728        skip_call |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
10729    }
10730    lock.unlock();
10731    if (!skip_call)
10732        dev_data->device_dispatch_table->CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
10733}
10734
10735// For any image objects that overlap mapped memory, verify that their layouts are PREINIT or GENERAL
10736static bool ValidateMapImageLayouts(VkDevice device, DEVICE_MEM_INFO const *mem_info, VkDeviceSize offset,
10737                                    VkDeviceSize end_offset) {
10738    bool skip_call = false;
10739    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10740    // Iterate over all bound image ranges and verify that for any that overlap the
10741    //  map ranges, the layouts are VK_IMAGE_LAYOUT_PREINITIALIZED or VK_IMAGE_LAYOUT_GENERAL
10742    // TODO : This can be optimized if we store ranges based on starting address and early exit when we pass our range
10743    for (auto image_handle : mem_info->bound_images) {
10744        auto img_it = mem_info->bound_ranges.find(image_handle);
10745        if (img_it != mem_info->bound_ranges.end()) {
10746            if (rangesIntersect(dev_data, &img_it->second, offset, end_offset)) {
10747                std::vector<VkImageLayout> layouts;
10748                if (FindLayouts(dev_data, VkImage(image_handle), layouts)) {
10749                    for (auto layout : layouts) {
10750                        if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
10751                            skip_call |=
10752                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10753                                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
10754                                                                                        "GENERAL or PREINITIALIZED are supported.",
10755                                        string_VkImageLayout(layout));
10756                        }
10757                    }
10758                }
10759            }
10760        }
10761    }
10762    return skip_call;
10763}
10764
10765VKAPI_ATTR VkResult VKAPI_CALL
10766MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
10767    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10768
10769    bool skip_call = false;
10770    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10771    std::unique_lock<std::mutex> lock(global_lock);
10772    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
10773    if (mem_info) {
10774        // TODO : This could me more fine-grained to track just region that is valid
10775        mem_info->global_valid = true;
10776        auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
10777        skip_call |= ValidateMapImageLayouts(device, mem_info, offset, end_offset);
10778        // TODO : Do we need to create new "bound_range" for the mapped range?
10779        SetMemRangesValid(dev_data, mem_info, offset, end_offset);
10780        if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
10781             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
10782            skip_call =
10783                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10784                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
10785                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
10786        }
10787    }
10788    skip_call |= ValidateMapMemRange(dev_data, mem, offset, size);
10789    lock.unlock();
10790
10791    if (!skip_call) {
10792        result = dev_data->device_dispatch_table->MapMemory(device, mem, offset, size, flags, ppData);
10793        if (VK_SUCCESS == result) {
10794            lock.lock();
10795            // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
10796            storeMemRanges(dev_data, mem, offset, size);
10797            initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
10798            lock.unlock();
10799        }
10800    }
10801    return result;
10802}
10803
10804VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
10805    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10806    bool skip_call = false;
10807
10808    std::unique_lock<std::mutex> lock(global_lock);
10809    skip_call |= deleteMemRanges(my_data, mem);
10810    lock.unlock();
10811    if (!skip_call) {
10812        my_data->device_dispatch_table->UnmapMemory(device, mem);
10813    }
10814}
10815
10816static bool validateMemoryIsMapped(layer_data *my_data, const char *funcName, uint32_t memRangeCount,
10817                                   const VkMappedMemoryRange *pMemRanges) {
10818    bool skip_call = false;
10819    for (uint32_t i = 0; i < memRangeCount; ++i) {
10820        auto mem_info = getMemObjInfo(my_data, pMemRanges[i].memory);
10821        if (mem_info) {
10822            if (mem_info->mem_range.offset > pMemRanges[i].offset) {
10823                skip_call |=
10824                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10825                            (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10826                            "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
10827                            "(" PRINTF_SIZE_T_SPECIFIER ").",
10828                            funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->mem_range.offset));
10829            }
10830
10831            const uint64_t my_dataTerminus = (mem_info->mem_range.size == VK_WHOLE_SIZE)
10832                                                 ? mem_info->alloc_info.allocationSize
10833                                                 : (mem_info->mem_range.offset + mem_info->mem_range.size);
10834            if (pMemRanges[i].size != VK_WHOLE_SIZE && (my_dataTerminus < (pMemRanges[i].offset + pMemRanges[i].size))) {
10835                skip_call |= log_msg(
10836                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10837                    (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10838                    "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER ") exceeds the Memory Object's upper-bound "
10839                    "(" PRINTF_SIZE_T_SPECIFIER ").",
10840                    funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size), static_cast<size_t>(my_dataTerminus));
10841            }
10842        }
10843    }
10844    return skip_call;
10845}
10846
10847static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *my_data, uint32_t memRangeCount,
10848                                                     const VkMappedMemoryRange *pMemRanges) {
10849    bool skip_call = false;
10850    for (uint32_t i = 0; i < memRangeCount; ++i) {
10851        auto mem_info = getMemObjInfo(my_data, pMemRanges[i].memory);
10852        if (mem_info) {
10853            if (mem_info->shadow_copy) {
10854                VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
10855                                        ? mem_info->mem_range.size
10856                                        : (mem_info->alloc_info.allocationSize - pMemRanges[i].offset);
10857                char *data = static_cast<char *>(mem_info->shadow_copy);
10858                for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
10859                    if (data[j] != NoncoherentMemoryFillValue) {
10860                        skip_call |= log_msg(
10861                            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10862                            (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10863                            "Memory underflow was detected on mem obj 0x%" PRIxLEAST64, (uint64_t)pMemRanges[i].memory);
10864                    }
10865                }
10866                for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
10867                    if (data[j] != NoncoherentMemoryFillValue) {
10868                        skip_call |= log_msg(
10869                            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10870                            (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10871                            "Memory overflow was detected on mem obj 0x%" PRIxLEAST64, (uint64_t)pMemRanges[i].memory);
10872                    }
10873                }
10874                memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
10875            }
10876        }
10877    }
10878    return skip_call;
10879}
10880
10881static void CopyNoncoherentMemoryFromDriver(layer_data *my_data, uint32_t memory_range_count,
10882                                            const VkMappedMemoryRange *mem_ranges) {
10883    for (uint32_t i = 0; i < memory_range_count; ++i) {
10884        auto mem_info = getMemObjInfo(my_data, mem_ranges[i].memory);
10885        if (mem_info && mem_info->shadow_copy) {
10886            VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
10887                                    ? mem_info->mem_range.size
10888                                    : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
10889            char *data = static_cast<char *>(mem_info->shadow_copy);
10890            memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
10891        }
10892    }
10893}
10894
10895VkResult VKAPI_CALL
10896FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10897    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10898    bool skip_call = false;
10899    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10900
10901    std::unique_lock<std::mutex> lock(global_lock);
10902    skip_call |= ValidateAndCopyNoncoherentMemoryToDriver(my_data, memRangeCount, pMemRanges);
10903    skip_call |= validateMemoryIsMapped(my_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
10904    lock.unlock();
10905    if (!skip_call) {
10906        result = my_data->device_dispatch_table->FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
10907    }
10908    return result;
10909}
10910
10911VkResult VKAPI_CALL
10912InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10913    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10914    bool skip_call = false;
10915    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10916
10917    std::unique_lock<std::mutex> lock(global_lock);
10918    skip_call |= validateMemoryIsMapped(my_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
10919    lock.unlock();
10920    if (!skip_call) {
10921        result = my_data->device_dispatch_table->InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
10922        // Update our shadow copy with modified driver data
10923        CopyNoncoherentMemoryFromDriver(my_data, memRangeCount, pMemRanges);
10924    }
10925    return result;
10926}
10927
10928VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
10929    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10930    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10931    bool skip_call = false;
10932    std::unique_lock<std::mutex> lock(global_lock);
10933    auto image_node = getImageNode(dev_data, image);
10934    if (image_node) {
10935        // Track objects tied to memory
10936        uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
10937        skip_call = SetMemBinding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
10938        VkMemoryRequirements memRequirements;
10939        lock.unlock();
10940        dev_data->device_dispatch_table->GetImageMemoryRequirements(device, image, &memRequirements);
10941        lock.lock();
10942
10943        // Track and validate bound memory range information
10944        auto mem_info = getMemObjInfo(dev_data, mem);
10945        if (mem_info) {
10946            skip_call |= InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, memRequirements,
10947                                                image_node->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
10948            skip_call |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "vkBindImageMemory");
10949        }
10950
10951        print_mem_list(dev_data);
10952        lock.unlock();
10953        if (!skip_call) {
10954            result = dev_data->device_dispatch_table->BindImageMemory(device, image, mem, memoryOffset);
10955            lock.lock();
10956            image_node->mem = mem;
10957            image_node->memOffset = memoryOffset;
10958            image_node->memSize = memRequirements.size;
10959            lock.unlock();
10960        }
10961    } else {
10962        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10963                reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
10964                "vkBindImageMemory: Cannot find invalid image 0x%" PRIx64 ", has it already been deleted?",
10965                reinterpret_cast<const uint64_t &>(image));
10966    }
10967    return result;
10968}
10969
10970VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
10971    bool skip_call = false;
10972    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10973    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10974    std::unique_lock<std::mutex> lock(global_lock);
10975    auto event_node = getEventNode(dev_data, event);
10976    if (event_node) {
10977        event_node->needsSignaled = false;
10978        event_node->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
10979        if (event_node->write_in_use) {
10980            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
10981                                 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
10982                                 "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
10983                                 reinterpret_cast<const uint64_t &>(event));
10984        }
10985    }
10986    lock.unlock();
10987    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
10988    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
10989    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
10990    for (auto queue_data : dev_data->queueMap) {
10991        auto event_entry = queue_data.second.eventToStageMap.find(event);
10992        if (event_entry != queue_data.second.eventToStageMap.end()) {
10993            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
10994        }
10995    }
10996    if (!skip_call)
10997        result = dev_data->device_dispatch_table->SetEvent(device, event);
10998    return result;
10999}
11000
11001VKAPI_ATTR VkResult VKAPI_CALL
11002QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
11003    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
11004    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11005    bool skip_call = false;
11006    std::unique_lock<std::mutex> lock(global_lock);
11007    auto pFence = getFenceNode(dev_data, fence);
11008    auto pQueue = getQueueNode(dev_data, queue);
11009
11010    // First verify that fence is not in use
11011    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
11012
11013    if (pFence) {
11014        SubmitFence(pQueue, pFence, bindInfoCount);
11015    }
11016
11017    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
11018        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
11019        // Track objects tied to memory
11020        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
11021            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
11022                if (set_sparse_mem_binding(dev_data, bindInfo.pBufferBinds[j].pBinds[k].memory,
11023                                           (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
11024                                           "vkQueueBindSparse"))
11025                    skip_call = true;
11026            }
11027        }
11028        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
11029            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
11030                if (set_sparse_mem_binding(dev_data, bindInfo.pImageOpaqueBinds[j].pBinds[k].memory,
11031                                           (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11032                                           "vkQueueBindSparse"))
11033                    skip_call = true;
11034            }
11035        }
11036        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
11037            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
11038                if (set_sparse_mem_binding(dev_data, bindInfo.pImageBinds[j].pBinds[k].memory,
11039                                           (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11040                                           "vkQueueBindSparse"))
11041                    skip_call = true;
11042            }
11043        }
11044
11045        std::vector<SEMAPHORE_WAIT> semaphore_waits;
11046        std::vector<VkSemaphore> semaphore_signals;
11047        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
11048            VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
11049            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11050            if (pSemaphore) {
11051                if (pSemaphore->signaled) {
11052                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
11053                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
11054                        pSemaphore->in_use.fetch_add(1);
11055                    }
11056                    pSemaphore->signaler.first = VK_NULL_HANDLE;
11057                    pSemaphore->signaled = false;
11058                } else {
11059                    skip_call |=
11060                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11061                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11062                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64
11063                                " that has no way to be signaled.",
11064                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
11065                }
11066            }
11067        }
11068        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
11069            VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
11070            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11071            if (pSemaphore) {
11072                if (pSemaphore->signaled) {
11073                    skip_call =
11074                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11075                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11076                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
11077                                ", but that semaphore is already signaled.",
11078                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
11079                }
11080                else {
11081                    pSemaphore->signaler.first = queue;
11082                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
11083                    pSemaphore->signaled = true;
11084                    pSemaphore->in_use.fetch_add(1);
11085                    semaphore_signals.push_back(semaphore);
11086                }
11087            }
11088        }
11089
11090        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
11091                                         semaphore_waits,
11092                                         semaphore_signals,
11093                                         bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
11094    }
11095
11096    if (pFence && !bindInfoCount) {
11097        // No work to do, just dropping a fence in the queue by itself.
11098        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
11099                                         std::vector<SEMAPHORE_WAIT>(),
11100                                         std::vector<VkSemaphore>(),
11101                                         fence);
11102    }
11103
11104    print_mem_list(dev_data);
11105    lock.unlock();
11106
11107    if (!skip_call)
11108        return dev_data->device_dispatch_table->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
11109
11110    return result;
11111}
11112
11113VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
11114                                               const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
11115    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11116    VkResult result = dev_data->device_dispatch_table->CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
11117    if (result == VK_SUCCESS) {
11118        std::lock_guard<std::mutex> lock(global_lock);
11119        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
11120        sNode->signaler.first = VK_NULL_HANDLE;
11121        sNode->signaler.second = 0;
11122        sNode->signaled = false;
11123    }
11124    return result;
11125}
11126
11127VKAPI_ATTR VkResult VKAPI_CALL
11128CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
11129    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11130    VkResult result = dev_data->device_dispatch_table->CreateEvent(device, pCreateInfo, pAllocator, pEvent);
11131    if (result == VK_SUCCESS) {
11132        std::lock_guard<std::mutex> lock(global_lock);
11133        dev_data->eventMap[*pEvent].needsSignaled = false;
11134        dev_data->eventMap[*pEvent].write_in_use = 0;
11135        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
11136    }
11137    return result;
11138}
11139
11140VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
11141                                                  const VkAllocationCallbacks *pAllocator,
11142                                                  VkSwapchainKHR *pSwapchain) {
11143    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11144    VkResult result = dev_data->device_dispatch_table->CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
11145
11146    if (VK_SUCCESS == result) {
11147        std::lock_guard<std::mutex> lock(global_lock);
11148        dev_data->device_extensions.swapchainMap[*pSwapchain] = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo));
11149    }
11150
11151    return result;
11152}
11153
11154VKAPI_ATTR void VKAPI_CALL
11155DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
11156    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11157    bool skip_call = false;
11158
11159    std::unique_lock<std::mutex> lock(global_lock);
11160    auto swapchain_data = getSwapchainNode(dev_data, swapchain);
11161    if (swapchain_data) {
11162        if (swapchain_data->images.size() > 0) {
11163            for (auto swapchain_image : swapchain_data->images) {
11164                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
11165                if (image_sub != dev_data->imageSubresourceMap.end()) {
11166                    for (auto imgsubpair : image_sub->second) {
11167                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
11168                        if (image_item != dev_data->imageLayoutMap.end()) {
11169                            dev_data->imageLayoutMap.erase(image_item);
11170                        }
11171                    }
11172                    dev_data->imageSubresourceMap.erase(image_sub);
11173                }
11174                skip_call =
11175                    clear_object_binding(dev_data, (uint64_t)swapchain_image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
11176                dev_data->imageMap.erase(swapchain_image);
11177            }
11178        }
11179        dev_data->device_extensions.swapchainMap.erase(swapchain);
11180    }
11181    lock.unlock();
11182    if (!skip_call)
11183        dev_data->device_dispatch_table->DestroySwapchainKHR(device, swapchain, pAllocator);
11184}
11185
11186VKAPI_ATTR VkResult VKAPI_CALL
11187GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
11188    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11189    VkResult result = dev_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
11190
11191    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
11192        // This should never happen and is checked by param checker.
11193        if (!pCount)
11194            return result;
11195        std::lock_guard<std::mutex> lock(global_lock);
11196        const size_t count = *pCount;
11197        auto swapchain_node = getSwapchainNode(dev_data, swapchain);
11198        if (swapchain_node && !swapchain_node->images.empty()) {
11199            // TODO : Not sure I like the memcmp here, but it works
11200            const bool mismatch = (swapchain_node->images.size() != count ||
11201                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
11202            if (mismatch) {
11203                // TODO: Verify against Valid Usage section of extension
11204                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11205                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
11206                        "vkGetSwapchainInfoKHR(0x%" PRIx64
11207                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
11208                        (uint64_t)(swapchain));
11209            }
11210        }
11211        for (uint32_t i = 0; i < *pCount; ++i) {
11212            IMAGE_LAYOUT_NODE image_layout_node;
11213            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
11214            image_layout_node.format = swapchain_node->createInfo.imageFormat;
11215            // Add imageMap entries for each swapchain image
11216            VkImageCreateInfo image_ci = {};
11217            image_ci.mipLevels = 1;
11218            image_ci.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
11219            image_ci.usage = swapchain_node->createInfo.imageUsage;
11220            image_ci.format = swapchain_node->createInfo.imageFormat;
11221            image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
11222            image_ci.extent.width = swapchain_node->createInfo.imageExtent.width;
11223            image_ci.extent.height = swapchain_node->createInfo.imageExtent.height;
11224            image_ci.sharingMode = swapchain_node->createInfo.imageSharingMode;
11225            dev_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_NODE>(new IMAGE_NODE(pSwapchainImages[i], &image_ci));
11226            auto &image_node = dev_data->imageMap[pSwapchainImages[i]];
11227            image_node->valid = false;
11228            image_node->mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
11229            swapchain_node->images.push_back(pSwapchainImages[i]);
11230            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
11231            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
11232            dev_data->imageLayoutMap[subpair] = image_layout_node;
11233            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
11234        }
11235    }
11236    return result;
11237}
11238
11239VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
11240    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
11241    bool skip_call = false;
11242
11243    std::lock_guard<std::mutex> lock(global_lock);
11244    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
11245        auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
11246        if (pSemaphore && !pSemaphore->signaled) {
11247            skip_call |=
11248                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11249                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11250                            "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
11251                            reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(pPresentInfo->pWaitSemaphores[i]));
11252        }
11253    }
11254
11255    for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
11256        auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
11257        if (swapchain_data && pPresentInfo->pImageIndices[i] < swapchain_data->images.size()) {
11258            VkImage image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
11259            skip_call |= ValidateImageMemoryIsValid(dev_data, getImageNode(dev_data, image), "vkQueuePresentKHR()");
11260            vector<VkImageLayout> layouts;
11261            if (FindLayouts(dev_data, image, layouts)) {
11262                for (auto layout : layouts) {
11263                    if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
11264                        skip_call |=
11265                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
11266                                        reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
11267                                        "Images passed to present must be in layout "
11268                                        "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR but is in %s",
11269                                        string_VkImageLayout(layout));
11270                    }
11271                }
11272            }
11273        }
11274    }
11275
11276    if (skip_call) {
11277        return VK_ERROR_VALIDATION_FAILED_EXT;
11278    }
11279
11280    VkResult result = dev_data->device_dispatch_table->QueuePresentKHR(queue, pPresentInfo);
11281
11282    if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
11283        // Semaphore waits occur before error generation, if the call reached
11284        // the ICD. (Confirm?)
11285        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
11286            auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
11287            if (pSemaphore) {
11288                pSemaphore->signaler.first = VK_NULL_HANDLE;
11289                pSemaphore->signaled = false;
11290            }
11291        }
11292
11293        // Note: even though presentation is directed to a queue, there is no
11294        // direct ordering between QP and subsequent work, so QP (and its
11295        // semaphore waits) /never/ participate in any completion proof.
11296    }
11297
11298    return result;
11299}
11300
11301VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
11302                                                         const VkSwapchainCreateInfoKHR *pCreateInfos,
11303                                                         const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
11304    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11305    std::unique_lock<std::mutex> lock(global_lock);
11306    VkResult result =
11307        dev_data->device_dispatch_table->CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
11308    return result;
11309}
11310
11311VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
11312                                                   VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
11313    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11314    bool skip_call = false;
11315
11316    std::unique_lock<std::mutex> lock(global_lock);
11317    auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11318    if (pSemaphore && pSemaphore->signaled) {
11319        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11320                             reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11321                             "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
11322    }
11323
11324    auto pFence = getFenceNode(dev_data, fence);
11325    if (pFence) {
11326        skip_call |= ValidateFenceForSubmit(dev_data, pFence);
11327    }
11328    lock.unlock();
11329
11330    if (skip_call)
11331        return VK_ERROR_VALIDATION_FAILED_EXT;
11332
11333    VkResult result =
11334            dev_data->device_dispatch_table->AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
11335
11336    lock.lock();
11337    if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
11338        if (pFence) {
11339            pFence->state = FENCE_INFLIGHT;
11340            pFence->signaler.first = VK_NULL_HANDLE;   // ANI isn't on a queue, so this can't participate in a completion proof.
11341        }
11342
11343        // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore
11344        if (pSemaphore) {
11345            pSemaphore->signaled = true;
11346            pSemaphore->signaler.first = VK_NULL_HANDLE;
11347        }
11348    }
11349    lock.unlock();
11350
11351    return result;
11352}
11353
11354VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
11355                                                        VkPhysicalDevice *pPhysicalDevices) {
11356    bool skip_call = false;
11357    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
11358    if (my_data->instance_state) {
11359        // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
11360        if (NULL == pPhysicalDevices) {
11361            my_data->instance_state->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
11362        } else {
11363            if (UNCALLED == my_data->instance_state->vkEnumeratePhysicalDevicesState) {
11364                // Flag warning here. You can call this without having queried the count, but it may not be
11365                // robust on platforms with multiple physical devices.
11366                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
11367                                    0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
11368                                    "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first "
11369                                    "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
11370            } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
11371            else if (my_data->instance_state->physical_devices_count != *pPhysicalDeviceCount) {
11372                // Having actual count match count from app is not a requirement, so this can be a warning
11373                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11374                                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
11375                                    "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count "
11376                                    "supported by this instance is %u.",
11377                                    *pPhysicalDeviceCount, my_data->instance_state->physical_devices_count);
11378            }
11379            my_data->instance_state->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
11380        }
11381        if (skip_call) {
11382            return VK_ERROR_VALIDATION_FAILED_EXT;
11383        }
11384        VkResult result =
11385            my_data->instance_dispatch_table->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
11386        if (NULL == pPhysicalDevices) {
11387            my_data->instance_state->physical_devices_count = *pPhysicalDeviceCount;
11388        } else if (result == VK_SUCCESS){ // Save physical devices
11389            for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
11390                layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(pPhysicalDevices[i]), layer_data_map);
11391                phy_dev_data->physical_device_state = unique_ptr<PHYSICAL_DEVICE_STATE>(new PHYSICAL_DEVICE_STATE());
11392                // Init actual features for each physical device
11393                my_data->instance_dispatch_table->GetPhysicalDeviceFeatures(pPhysicalDevices[i],
11394                                                                            &phy_dev_data->physical_device_features);
11395            }
11396        }
11397        return result;
11398    } else {
11399        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__,
11400                DEVLIMITS_INVALID_INSTANCE, "DL", "Invalid instance (0x%" PRIxLEAST64 ") passed into vkEnumeratePhysicalDevices().",
11401                (uint64_t)instance);
11402    }
11403    return VK_ERROR_VALIDATION_FAILED_EXT;
11404}
11405
11406VKAPI_ATTR void VKAPI_CALL
11407GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
11408    VkQueueFamilyProperties *pQueueFamilyProperties) {
11409    bool skip_call = false;
11410    layer_data *phy_dev_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
11411    if (phy_dev_data->physical_device_state) {
11412        if (NULL == pQueueFamilyProperties) {
11413            phy_dev_data->physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
11414        }
11415        else {
11416            // Verify that for each physical device, this function is called first with NULL pQueueFamilyProperties ptr in order to
11417            // get count
11418            if (UNCALLED == phy_dev_data->physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
11419                skip_call |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11420                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
11421                    "Call sequence has vkGetPhysicalDeviceQueueFamilyProperties() w/ non-NULL "
11422                    "pQueueFamilyProperties. You should first call vkGetPhysicalDeviceQueueFamilyProperties() w/ "
11423                    "NULL pQueueFamilyProperties to query pCount.");
11424            }
11425            // Then verify that pCount that is passed in on second call matches what was returned
11426            if (phy_dev_data->physical_device_state->queueFamilyPropertiesCount != *pCount) {
11427
11428                // TODO: this is not a requirement of the Valid Usage section for vkGetPhysicalDeviceQueueFamilyProperties, so
11429                // provide as warning
11430                skip_call |= log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11431                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
11432                    "Call to vkGetPhysicalDeviceQueueFamilyProperties() w/ pCount value %u, but actual count "
11433                    "supported by this physicalDevice is %u.",
11434                    *pCount, phy_dev_data->physical_device_state->queueFamilyPropertiesCount);
11435            }
11436            phy_dev_data->physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
11437        }
11438        if (skip_call) {
11439            return;
11440        }
11441        phy_dev_data->instance_dispatch_table->GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pCount,
11442            pQueueFamilyProperties);
11443        if (NULL == pQueueFamilyProperties) {
11444            phy_dev_data->physical_device_state->queueFamilyPropertiesCount = *pCount;
11445        }
11446        else { // Save queue family properties
11447            phy_dev_data->queue_family_properties.reserve(*pCount);
11448            for (uint32_t i = 0; i < *pCount; i++) {
11449                phy_dev_data->queue_family_properties.emplace_back(new VkQueueFamilyProperties(pQueueFamilyProperties[i]));
11450            }
11451        }
11452        return;
11453    }
11454    else {
11455        log_msg(phy_dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
11456            __LINE__, DEVLIMITS_INVALID_PHYSICAL_DEVICE, "DL",
11457            "Invalid physicalDevice (0x%" PRIxLEAST64 ") passed into vkGetPhysicalDeviceQueueFamilyProperties().",
11458            (uint64_t)physicalDevice);
11459    }
11460}
11461
11462VKAPI_ATTR VkResult VKAPI_CALL
11463CreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
11464                             const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
11465    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
11466    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
11467    VkResult res = pTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
11468    if (VK_SUCCESS == res) {
11469        std::lock_guard<std::mutex> lock(global_lock);
11470        res = layer_create_msg_callback(my_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
11471    }
11472    return res;
11473}
11474
11475VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance,
11476                                                         VkDebugReportCallbackEXT msgCallback,
11477                                                         const VkAllocationCallbacks *pAllocator) {
11478    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
11479    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
11480    pTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
11481    std::lock_guard<std::mutex> lock(global_lock);
11482    layer_destroy_msg_callback(my_data->report_data, msgCallback, pAllocator);
11483}
11484
11485VKAPI_ATTR void VKAPI_CALL
11486DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
11487                      size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
11488    layer_data *my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
11489    my_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
11490                                                            pMsg);
11491}
11492
11493VKAPI_ATTR VkResult VKAPI_CALL
11494EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
11495    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11496}
11497
11498VKAPI_ATTR VkResult VKAPI_CALL
11499EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
11500    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11501}
11502
11503VKAPI_ATTR VkResult VKAPI_CALL
11504EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
11505    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11506        return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
11507
11508    return VK_ERROR_LAYER_NOT_PRESENT;
11509}
11510
11511VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
11512                                                                  const char *pLayerName, uint32_t *pCount,
11513                                                                  VkExtensionProperties *pProperties) {
11514    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11515        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
11516
11517    assert(physicalDevice);
11518
11519    dispatch_key key = get_dispatch_key(physicalDevice);
11520    layer_data *my_data = get_my_data_ptr(key, layer_data_map);
11521    return my_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
11522}
11523
11524static PFN_vkVoidFunction
11525intercept_core_instance_command(const char *name);
11526
11527static PFN_vkVoidFunction
11528intercept_core_device_command(const char *name);
11529
11530static PFN_vkVoidFunction
11531intercept_khr_swapchain_command(const char *name, VkDevice dev);
11532
11533VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) {
11534    PFN_vkVoidFunction proc = intercept_core_device_command(funcName);
11535    if (proc)
11536        return proc;
11537
11538    assert(dev);
11539
11540    proc = intercept_khr_swapchain_command(funcName, dev);
11541    if (proc)
11542        return proc;
11543
11544    layer_data *dev_data;
11545    dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
11546
11547    VkLayerDispatchTable *pTable = dev_data->device_dispatch_table;
11548    {
11549        if (pTable->GetDeviceProcAddr == NULL)
11550            return NULL;
11551        return pTable->GetDeviceProcAddr(dev, funcName);
11552    }
11553}
11554
11555VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
11556    PFN_vkVoidFunction proc = intercept_core_instance_command(funcName);
11557    if (!proc)
11558        proc = intercept_core_device_command(funcName);
11559    if (!proc)
11560        proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE);
11561    if (proc)
11562        return proc;
11563
11564    assert(instance);
11565
11566    layer_data *my_data;
11567    my_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
11568    proc = debug_report_get_instance_proc_addr(my_data->report_data, funcName);
11569    if (proc)
11570        return proc;
11571
11572    VkLayerInstanceDispatchTable *pTable = my_data->instance_dispatch_table;
11573    if (pTable->GetInstanceProcAddr == NULL)
11574        return NULL;
11575    return pTable->GetInstanceProcAddr(instance, funcName);
11576}
11577
11578static PFN_vkVoidFunction
11579intercept_core_instance_command(const char *name) {
11580    static const struct {
11581        const char *name;
11582        PFN_vkVoidFunction proc;
11583    } core_instance_commands[] = {
11584        { "vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr) },
11585        { "vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr) },
11586        { "vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance) },
11587        { "vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice) },
11588        { "vkEnumeratePhysicalDevices", reinterpret_cast<PFN_vkVoidFunction>(EnumeratePhysicalDevices) },
11589        { "vkGetPhysicalDeviceQueueFamilyProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceQueueFamilyProperties) },
11590        { "vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance) },
11591        { "vkEnumerateInstanceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceLayerProperties) },
11592        { "vkEnumerateDeviceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceLayerProperties) },
11593        { "vkEnumerateInstanceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceExtensionProperties) },
11594        { "vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties) },
11595    };
11596
11597    for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) {
11598        if (!strcmp(core_instance_commands[i].name, name))
11599            return core_instance_commands[i].proc;
11600    }
11601
11602    return nullptr;
11603}
11604
11605static PFN_vkVoidFunction
11606intercept_core_device_command(const char *name) {
11607    static const struct {
11608        const char *name;
11609        PFN_vkVoidFunction proc;
11610    } core_device_commands[] = {
11611        {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
11612        {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)},
11613        {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)},
11614        {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)},
11615        {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)},
11616        {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)},
11617        {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)},
11618        {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
11619        {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)},
11620        {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)},
11621        {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)},
11622        {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)},
11623        {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)},
11624        {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)},
11625        {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)},
11626        {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)},
11627        {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)},
11628        {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)},
11629        {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)},
11630        {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)},
11631        {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)},
11632        {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)},
11633        {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)},
11634        {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)},
11635        {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)},
11636        {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)},
11637        {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)},
11638        {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)},
11639        {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)},
11640        {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)},
11641        {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)},
11642        {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)},
11643        {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)},
11644        {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)},
11645        {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)},
11646        {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)},
11647        {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)},
11648        {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)},
11649        {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)},
11650        {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)},
11651        {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)},
11652        {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)},
11653        {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)},
11654        {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)},
11655        {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)},
11656        {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)},
11657        {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)},
11658        {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)},
11659        {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)},
11660        {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)},
11661        {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)},
11662        {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)},
11663        {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)},
11664        {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)},
11665        {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)},
11666        {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)},
11667        {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)},
11668        {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)},
11669        {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)},
11670        {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)},
11671        {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)},
11672        {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)},
11673        {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)},
11674        {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)},
11675        {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)},
11676        {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)},
11677        {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)},
11678        {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)},
11679        {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)},
11680        {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)},
11681        {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)},
11682        {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)},
11683        {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)},
11684        {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)},
11685        {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)},
11686        {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)},
11687        {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)},
11688        {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)},
11689        {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)},
11690        {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)},
11691        {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)},
11692        {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)},
11693        {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)},
11694        {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)},
11695        {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)},
11696        {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)},
11697        {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)},
11698        {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)},
11699        {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)},
11700        {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)},
11701        {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)},
11702        {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)},
11703        {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)},
11704        {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)},
11705        {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)},
11706        {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)},
11707        {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)},
11708        {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)},
11709        {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)},
11710        {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)},
11711        {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)},
11712        {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)},
11713        {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)},
11714        {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)},
11715        {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)},
11716        {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)},
11717        {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)},
11718        {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)},
11719        {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)},
11720        {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)},
11721        {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)},
11722        {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)},
11723        {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)},
11724        {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)},
11725        {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)},
11726        {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)},
11727    };
11728
11729    for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) {
11730        if (!strcmp(core_device_commands[i].name, name))
11731            return core_device_commands[i].proc;
11732    }
11733
11734    return nullptr;
11735}
11736
11737static PFN_vkVoidFunction
11738intercept_khr_swapchain_command(const char *name, VkDevice dev) {
11739    static const struct {
11740        const char *name;
11741        PFN_vkVoidFunction proc;
11742    } khr_swapchain_commands[] = {
11743        { "vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR) },
11744        { "vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR) },
11745        { "vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR) },
11746        { "vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR) },
11747        { "vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR) },
11748    };
11749    layer_data *dev_data = nullptr;
11750
11751    if (dev) {
11752        dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
11753        if (!dev_data->device_extensions.wsi_enabled)
11754            return nullptr;
11755    }
11756
11757    for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) {
11758        if (!strcmp(khr_swapchain_commands[i].name, name))
11759            return khr_swapchain_commands[i].proc;
11760    }
11761
11762    if (dev_data) {
11763        if (!dev_data->device_extensions.wsi_display_swapchain_enabled)
11764            return nullptr;
11765    }
11766
11767    if (!strcmp("vkCreateSharedSwapchainsKHR", name))
11768        return reinterpret_cast<PFN_vkVoidFunction>(CreateSharedSwapchainsKHR);
11769
11770    return nullptr;
11771}
11772
11773} // namespace core_validation
11774
11775// vk_layer_logging.h expects these to be defined
11776
11777VKAPI_ATTR VkResult VKAPI_CALL
11778vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
11779                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
11780    return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
11781}
11782
11783VKAPI_ATTR void VKAPI_CALL
11784vkDestroyDebugReportCallbackEXT(VkInstance instance,
11785                                VkDebugReportCallbackEXT msgCallback,
11786                                const VkAllocationCallbacks *pAllocator) {
11787    core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
11788}
11789
11790VKAPI_ATTR void VKAPI_CALL
11791vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
11792                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
11793    core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
11794}
11795
11796// loader-layer interface v0, just wrappers since there is only a layer
11797
11798VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
11799vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
11800    return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
11801}
11802
11803VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
11804vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
11805    return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
11806}
11807
11808VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
11809vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
11810    // the layer command handles VK_NULL_HANDLE just fine internally
11811    assert(physicalDevice == VK_NULL_HANDLE);
11812    return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
11813}
11814
11815VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
11816                                                                                    const char *pLayerName, uint32_t *pCount,
11817                                                                                    VkExtensionProperties *pProperties) {
11818    // the layer command handles VK_NULL_HANDLE just fine internally
11819    assert(physicalDevice == VK_NULL_HANDLE);
11820    return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
11821}
11822
11823VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
11824    return core_validation::GetDeviceProcAddr(dev, funcName);
11825}
11826
11827VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
11828    return core_validation::GetInstanceProcAddr(instance, funcName);
11829}
11830