core_validation.cpp revision 7286e20c06011d3c6fa7edfbdbadd42bb6e8cc35
1/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *     http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
25 */
26
27// Allow use of STL min and max functions in Windows
28#define NOMINMAX
29
30// Turn on mem_tracker merged code
31#define MTMERGESOURCE 1
32
33#include <SPIRV/spirv.hpp>
34#include <algorithm>
35#include <assert.h>
36#include <iostream>
37#include <list>
38#include <map>
39#include <mutex>
40#include <set>
41//#include <memory>
42#include <stdio.h>
43#include <stdlib.h>
44#include <string.h>
45#include <string>
46#include <tuple>
47
48#include "vk_loader_platform.h"
49#include "vk_dispatch_table_helper.h"
50#include "vk_struct_string_helper_cpp.h"
51#if defined(__GNUC__)
52#pragma GCC diagnostic ignored "-Wwrite-strings"
53#endif
54#if defined(__GNUC__)
55#pragma GCC diagnostic warning "-Wwrite-strings"
56#endif
57#include "vk_struct_size_helper.h"
58#include "core_validation.h"
59#include "vk_layer_table.h"
60#include "vk_layer_data.h"
61#include "vk_layer_extension_utils.h"
62#include "vk_layer_utils.h"
63#include "spirv-tools/libspirv.h"
64
65#if defined __ANDROID__
66#include <android/log.h>
67#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
68#else
69#define LOGCONSOLE(...)                                                                                                            \
70    {                                                                                                                              \
71        printf(__VA_ARGS__);                                                                                                       \
72        printf("\n");                                                                                                              \
73    }
74#endif
75
76using namespace std;
77
78namespace core_validation {
79
80using std::unordered_map;
81using std::unordered_set;
82
83// WSI Image Objects bypass usual Image Object creation methods.  A special Memory
84// Object value will be used to identify them internally.
85static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
86// 2nd special memory handle used to flag object as unbound from memory
87static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
88
89struct devExts {
90    bool wsi_enabled;
91    bool wsi_display_swapchain_enabled;
92    unordered_map<VkSwapchainKHR, unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
93    unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
94};
95
96// fwd decls
97struct shader_module;
98
99struct instance_layer_data {
100    VkInstance instance = VK_NULL_HANDLE;
101    unique_ptr<INSTANCE_STATE> instance_state = nullptr;
102    debug_report_data *report_data = nullptr;
103    std::vector<VkDebugReportCallbackEXT> logging_callback;
104    VkLayerInstanceDispatchTable dispatch_table;
105
106    unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
107    unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
108
109    bool surfaceExtensionEnabled = false;
110    bool displayExtensionEnabled = false;
111#ifdef VK_USE_PLATFORM_ANDROID_KHR
112    bool androidSurfaceExtensionEnabled = false;
113#endif
114#ifdef VK_USE_PLATFORM_MIR_KHR
115    bool mirSurfaceExtensionEnabled = false;
116#endif
117#ifdef VK_USE_PLATFORM_WAYLAND_KHR
118    bool waylandSurfaceExtensionEnabled = false;
119#endif
120#ifdef VK_USE_PLATFORM_WIN32_KHR
121    bool win32SurfaceExtensionEnabled = false;
122#endif
123#ifdef VK_USE_PLATFORM_XCB_KHR
124    bool xcbSurfaceExtensionEnabled = false;
125#endif
126#ifdef VK_USE_PLATFORM_XLIB_KHR
127    bool xlibSurfaceExtensionEnabled = false;
128#endif
129};
130
131struct layer_data {
132    debug_report_data *report_data = nullptr;
133    VkLayerDispatchTable dispatch_table;
134    unique_ptr<INSTANCE_STATE> instance_state = nullptr;
135
136    devExts device_extensions = {};
137    unordered_set<VkQueue> queues;  // All queues under given device
138    // Global set of all cmdBuffers that are inFlight on this device
139    unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
140    // Layer specific data
141    unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> samplerMap;
142    unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
143    unordered_map<VkImage, unique_ptr<IMAGE_NODE>> imageMap;
144    unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
145    unordered_map<VkBuffer, unique_ptr<BUFFER_NODE>> bufferMap;
146    unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
147    unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
148    unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
149    unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
150    unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
151    unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
152    unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
153    unordered_map<VkFence, FENCE_NODE> fenceMap;
154    unordered_map<VkQueue, QUEUE_NODE> queueMap;
155    unordered_map<VkEvent, EVENT_NODE> eventMap;
156    unordered_map<QueryObject, bool> queryToStateMap;
157    unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
158    unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
159    unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
160    unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_NODE>> frameBufferMap;
161    unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
162    unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
163    unordered_map<VkRenderPass, unique_ptr<RENDER_PASS_NODE>> renderPassMap;
164    unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
165    VkDevice device = VK_NULL_HANDLE;
166
167    instance_layer_data *instance_data = nullptr;  // from device to enclosing instance
168
169    VkPhysicalDeviceFeatures enabled_features = {};
170    // Device specific data
171    PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
172    VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
173};
174
175// TODO : Do we need to guard access to layer_data_map w/ lock?
176static unordered_map<void *, layer_data *> layer_data_map;
177static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
178
179static const VkLayerProperties global_layer = {
180    "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
181};
182
183template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
184    bool foundLayer = false;
185    for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
186        if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
187            foundLayer = true;
188        }
189        // This has to be logged to console as we don't have a callback at this point.
190        if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
191            LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
192                       global_layer.layerName);
193        }
194    }
195}
196
197// Code imported from shader_checker
198static void build_def_index(shader_module *);
199
200// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
201// without the caller needing to care too much about the physical SPIRV module layout.
202struct spirv_inst_iter {
203    std::vector<uint32_t>::const_iterator zero;
204    std::vector<uint32_t>::const_iterator it;
205
206    uint32_t len() {
207        auto result = *it >> 16;
208        assert(result > 0);
209        return result;
210    }
211
212    uint32_t opcode() { return *it & 0x0ffffu; }
213
214    uint32_t const &word(unsigned n) {
215        assert(n < len());
216        return it[n];
217    }
218
219    uint32_t offset() { return (uint32_t)(it - zero); }
220
221    spirv_inst_iter() {}
222
223    spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
224
225    bool operator==(spirv_inst_iter const &other) { return it == other.it; }
226
227    bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
228
229    spirv_inst_iter operator++(int) { /* x++ */
230        spirv_inst_iter ii = *this;
231        it += len();
232        return ii;
233    }
234
235    spirv_inst_iter operator++() { /* ++x; */
236        it += len();
237        return *this;
238    }
239
240    /* The iterator and the value are the same thing. */
241    spirv_inst_iter &operator*() { return *this; }
242    spirv_inst_iter const &operator*() const { return *this; }
243};
244
245struct shader_module {
246    /* the spirv image itself */
247    vector<uint32_t> words;
248    /* a mapping of <id> to the first word of its def. this is useful because walking type
249     * trees, constant expressions, etc requires jumping all over the instruction stream.
250     */
251    unordered_map<unsigned, unsigned> def_index;
252
253    shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
254        : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
255          def_index() {
256
257        build_def_index(this);
258    }
259
260    /* expose begin() / end() to enable range-based for */
261    spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
262    spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); }         /* just past last insn */
263    /* given an offset into the module, produce an iterator there. */
264    spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
265
266    /* gets an iterator to the definition of an id */
267    spirv_inst_iter get_def(unsigned id) const {
268        auto it = def_index.find(id);
269        if (it == def_index.end()) {
270            return end();
271        }
272        return at(it->second);
273    }
274};
275
276// TODO : This can be much smarter, using separate locks for separate global data
277static std::mutex global_lock;
278
279// Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
280IMAGE_VIEW_STATE *getImageViewState(const layer_data *dev_data, VkImageView image_view) {
281    auto iv_it = dev_data->imageViewMap.find(image_view);
282    if (iv_it == dev_data->imageViewMap.end()) {
283        return nullptr;
284    }
285    return iv_it->second.get();
286}
287// Return sampler node ptr for specified sampler or else NULL
288SAMPLER_NODE *getSamplerNode(const layer_data *dev_data, VkSampler sampler) {
289    auto sampler_it = dev_data->samplerMap.find(sampler);
290    if (sampler_it == dev_data->samplerMap.end()) {
291        return nullptr;
292    }
293    return sampler_it->second.get();
294}
295// Return image node ptr for specified image or else NULL
296IMAGE_NODE *getImageNode(const layer_data *dev_data, VkImage image) {
297    auto img_it = dev_data->imageMap.find(image);
298    if (img_it == dev_data->imageMap.end()) {
299        return nullptr;
300    }
301    return img_it->second.get();
302}
303// Return buffer node ptr for specified buffer or else NULL
304BUFFER_NODE *getBufferNode(const layer_data *dev_data, VkBuffer buffer) {
305    auto buff_it = dev_data->bufferMap.find(buffer);
306    if (buff_it == dev_data->bufferMap.end()) {
307        return nullptr;
308    }
309    return buff_it->second.get();
310}
311// Return swapchain node for specified swapchain or else NULL
312SWAPCHAIN_NODE *getSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
313    auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain);
314    if (swp_it == dev_data->device_extensions.swapchainMap.end()) {
315        return nullptr;
316    }
317    return swp_it->second.get();
318}
319// Return swapchain for specified image or else NULL
320VkSwapchainKHR getSwapchainFromImage(const layer_data *dev_data, VkImage image) {
321    auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image);
322    if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) {
323        return VK_NULL_HANDLE;
324    }
325    return img_it->second;
326}
327// Return buffer node ptr for specified buffer or else NULL
328BUFFER_VIEW_STATE *getBufferViewState(const layer_data *my_data, VkBufferView buffer_view) {
329    auto bv_it = my_data->bufferViewMap.find(buffer_view);
330    if (bv_it == my_data->bufferViewMap.end()) {
331        return nullptr;
332    }
333    return bv_it->second.get();
334}
335
336FENCE_NODE *getFenceNode(layer_data *dev_data, VkFence fence) {
337    auto it = dev_data->fenceMap.find(fence);
338    if (it == dev_data->fenceMap.end()) {
339        return nullptr;
340    }
341    return &it->second;
342}
343
344EVENT_NODE *getEventNode(layer_data *dev_data, VkEvent event) {
345    auto it = dev_data->eventMap.find(event);
346    if (it == dev_data->eventMap.end()) {
347        return nullptr;
348    }
349    return &it->second;
350}
351
352QUERY_POOL_NODE *getQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
353    auto it = dev_data->queryPoolMap.find(query_pool);
354    if (it == dev_data->queryPoolMap.end()) {
355        return nullptr;
356    }
357    return &it->second;
358}
359
360QUEUE_NODE *getQueueNode(layer_data *dev_data, VkQueue queue) {
361    auto it = dev_data->queueMap.find(queue);
362    if (it == dev_data->queueMap.end()) {
363        return nullptr;
364    }
365    return &it->second;
366}
367
368SEMAPHORE_NODE *getSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
369    auto it = dev_data->semaphoreMap.find(semaphore);
370    if (it == dev_data->semaphoreMap.end()) {
371        return nullptr;
372    }
373    return &it->second;
374}
375
376COMMAND_POOL_NODE *getCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
377    auto it = dev_data->commandPoolMap.find(pool);
378    if (it == dev_data->commandPoolMap.end()) {
379        return nullptr;
380    }
381    return &it->second;
382}
383
384PHYSICAL_DEVICE_STATE *getPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
385    auto it = instance_data->physical_device_map.find(phys);
386    if (it == instance_data->physical_device_map.end()) {
387        return nullptr;
388    }
389    return &it->second;
390}
391
392SURFACE_STATE *getSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
393    auto it = instance_data->surface_map.find(surface);
394    if (it == instance_data->surface_map.end()) {
395        return nullptr;
396    }
397    return &it->second;
398}
399
400// Return ptr to bound memory for given handle of specified type and set sparse param to indicate if binding is sparse
401static VkDeviceMemory *GetObjectMemBinding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type, bool *sparse) {
402    switch (type) {
403    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
404        auto img_node = getImageNode(my_data, VkImage(handle));
405        *sparse = img_node->createInfo.flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
406        if (img_node)
407            return &img_node->mem;
408        break;
409    }
410    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
411        auto buff_node = getBufferNode(my_data, VkBuffer(handle));
412        *sparse = buff_node->createInfo.flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT;
413        if (buff_node)
414            return &buff_node->mem;
415        break;
416    }
417    default:
418        break;
419    }
420    return nullptr;
421}
422// Overloaded version of above function that doesn't care about sparse bool
423static VkDeviceMemory *GetObjectMemBinding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
424    bool sparse;
425    return GetObjectMemBinding(my_data, handle, type, &sparse);
426}
427// prototype
428static GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer);
429
430// Helper function to validate correct usage bits set for buffers or images
431//  Verify that (actual & desired) flags != 0 or,
432//   if strict is true, verify that (actual & desired) flags == desired
433//  In case of error, report it via dbg callbacks
434static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict,
435                                     uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
436                                     char const *func_name, char const *usage_str) {
437    bool correct_usage = false;
438    bool skip_call = false;
439    if (strict)
440        correct_usage = ((actual & desired) == desired);
441    else
442        correct_usage = ((actual & desired) != 0);
443    if (!correct_usage) {
444        skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
445                            MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s 0x%" PRIxLEAST64
446                                                                " used by %s. In this case, %s should have %s set during creation.",
447                            ty_str, obj_handle, func_name, ty_str, usage_str);
448    }
449    return skip_call;
450}
451
452// Helper function to validate usage flags for buffers
453// For given buffer_node send actual vs. desired usage off to helper above where
454//  an error will be flagged if usage is not correct
455static bool ValidateImageUsageFlags(layer_data *dev_data, IMAGE_NODE const *image_node, VkFlags desired, VkBool32 strict,
456                                    char const *func_name, char const *usage_string) {
457    return validate_usage_flags(dev_data, image_node->createInfo.usage, desired, strict,
458                                reinterpret_cast<const uint64_t &>(image_node->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
459                                "image", func_name, usage_string);
460}
461
462// Helper function to validate usage flags for buffers
463// For given buffer_node send actual vs. desired usage off to helper above where
464//  an error will be flagged if usage is not correct
465static bool ValidateBufferUsageFlags(layer_data *dev_data, BUFFER_NODE const *buffer_node, VkFlags desired, VkBool32 strict,
466                                     char const *func_name, char const *usage_string) {
467    return validate_usage_flags(dev_data, buffer_node->createInfo.usage, desired, strict,
468                                reinterpret_cast<const uint64_t &>(buffer_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
469                                "buffer", func_name, usage_string);
470}
471
472// Return ptr to info in map container containing mem, or NULL if not found
473//  Calls to this function should be wrapped in mutex
474DEVICE_MEM_INFO *getMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
475    auto mem_it = dev_data->memObjMap.find(mem);
476    if (mem_it == dev_data->memObjMap.end()) {
477        return NULL;
478    }
479    return mem_it->second.get();
480}
481
482static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
483                             const VkMemoryAllocateInfo *pAllocateInfo) {
484    assert(object != NULL);
485
486    my_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
487}
488
489// Helper function to print lowercase string of object type
490//  TODO: Unify string helper functions, this should really come out of a string helper if not there already
491static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
492    switch (type) {
493    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
494        return "image";
495    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
496        return "buffer";
497    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT:
498        return "image view";
499    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT:
500        return "buffer view";
501    case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
502        return "swapchain";
503    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT:
504        return "descriptor set";
505    case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT:
506        return "framebuffer";
507    case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT:
508        return "event";
509    case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT:
510        return "query pool";
511    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT:
512        return "descriptor pool";
513    case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT:
514        return "command pool";
515    case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT:
516        return "pipeline";
517    case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT:
518        return "sampler";
519    case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT:
520        return "renderpass";
521    case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT:
522        return "device memory";
523    case VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT:
524        return "semaphore";
525    default:
526        return "unknown";
527    }
528}
529
530// For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
531static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle,
532                                  VkDebugReportObjectTypeEXT type, const char *functionName) {
533    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
534    if (mem_info) {
535        if (!mem_info->bound_ranges[bound_object_handle].valid) {
536            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
537                           reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
538                           "%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64
539                           ", please fill the memory before using.",
540                           functionName, reinterpret_cast<uint64_t &>(mem), object_type_to_string(type), bound_object_handle);
541        }
542    }
543    return false;
544}
545// For given image_node
546//  If mem is special swapchain key, then verify that image_node valid member is true
547//  Else verify that the image's bound memory range is valid
548static bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_NODE *image_node, const char *functionName) {
549    if (image_node->mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
550        if (!image_node->valid) {
551            return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
552                           reinterpret_cast<uint64_t &>(image_node->mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
553                           "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
554                           functionName, reinterpret_cast<uint64_t &>(image_node->image));
555        }
556    } else {
557        return ValidateMemoryIsValid(dev_data, image_node->mem, reinterpret_cast<uint64_t &>(image_node->image),
558                                     VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, functionName);
559    }
560    return false;
561}
562// For given buffer_node, verify that the range it's bound to is valid
563static bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_NODE *buffer_node, const char *functionName) {
564    return ValidateMemoryIsValid(dev_data, buffer_node->mem, reinterpret_cast<uint64_t &>(buffer_node->buffer),
565                                 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, functionName);
566}
567// For the given memory allocation, set the range bound by the given handle object to the valid param value
568static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
569    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
570    if (mem_info) {
571        mem_info->bound_ranges[handle].valid = valid;
572    }
573}
574// For given image node
575//  If mem is special swapchain key, then set entire image_node to valid param value
576//  Else set the image's bound memory range to valid param value
577static void SetImageMemoryValid(layer_data *dev_data, IMAGE_NODE *image_node, bool valid) {
578    if (image_node->mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
579        image_node->valid = valid;
580    } else {
581        SetMemoryValid(dev_data, image_node->mem, reinterpret_cast<uint64_t &>(image_node->image), valid);
582    }
583}
584// For given buffer node set the buffer's bound memory range to valid param value
585static void SetBufferMemoryValid(layer_data *dev_data, BUFFER_NODE *buffer_node, bool valid) {
586    SetMemoryValid(dev_data, buffer_node->mem, reinterpret_cast<uint64_t &>(buffer_node->buffer), valid);
587}
588// Find CB Info and add mem reference to list container
589// Find Mem Obj Info and add CB reference to list container
590static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
591                                              const char *apiName) {
592    bool skip_call = false;
593
594    // Skip validation if this image was created through WSI
595    if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
596
597        // First update CB binding in MemObj mini CB list
598        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
599        if (pMemInfo) {
600            pMemInfo->command_buffer_bindings.insert(cb);
601            // Now update CBInfo's Mem reference list
602            GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
603            // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
604            if (pCBNode) {
605                pCBNode->memObjs.insert(mem);
606            }
607        }
608    }
609    return skip_call;
610}
611
612// Create binding link between given sampler and command buffer node
613void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_NODE *sampler_node) {
614    sampler_node->cb_bindings.insert(cb_node);
615    cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(sampler_node->sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT});
616}
617
618// Create binding link between given image node and command buffer node
619void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_NODE *img_node) {
620    // Skip validation if this image was created through WSI
621    if (img_node->mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
622        // First update CB binding in MemObj mini CB list
623        DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, img_node->mem);
624        if (pMemInfo) {
625            pMemInfo->command_buffer_bindings.insert(cb_node->commandBuffer);
626            // Now update CBInfo's Mem reference list
627            cb_node->memObjs.insert(img_node->mem);
628        }
629        // Now update cb binding for image
630        cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(img_node->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT});
631        img_node->cb_bindings.insert(cb_node);
632    }
633}
634
635// Create binding link between given image view node and its image with command buffer node
636void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
637    // First add bindings for imageView
638    view_state->cb_bindings.insert(cb_node);
639    cb_node->object_bindings.insert(
640        {reinterpret_cast<uint64_t &>(view_state->image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT});
641    auto image_node = getImageNode(dev_data, view_state->create_info.image);
642    // Add bindings for image within imageView
643    if (image_node) {
644        AddCommandBufferBindingImage(dev_data, cb_node, image_node);
645    }
646}
647
648// Create binding link between given buffer node and command buffer node
649void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_NODE *buff_node) {
650    // First update CB binding in MemObj mini CB list
651    DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, buff_node->mem);
652    if (pMemInfo) {
653        pMemInfo->command_buffer_bindings.insert(cb_node->commandBuffer);
654        // Now update CBInfo's Mem reference list
655        cb_node->memObjs.insert(buff_node->mem);
656        cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(buff_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
657    }
658    // Now update cb binding for buffer
659    buff_node->cb_bindings.insert(cb_node);
660}
661
662// Create binding link between given buffer view node and its buffer with command buffer node
663void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
664    // First add bindings for bufferView
665    view_state->cb_bindings.insert(cb_node);
666    cb_node->object_bindings.insert(
667        {reinterpret_cast<uint64_t &>(view_state->buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT});
668    auto buffer_node = getBufferNode(dev_data, view_state->create_info.buffer);
669    // Add bindings for buffer within bufferView
670    if (buffer_node) {
671        AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_node);
672    }
673}
674
675// For every mem obj bound to particular CB, free bindings related to that CB
676static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *pCBNode) {
677    if (pCBNode) {
678        if (pCBNode->memObjs.size() > 0) {
679            for (auto mem : pCBNode->memObjs) {
680                DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
681                if (pInfo) {
682                    pInfo->command_buffer_bindings.erase(pCBNode->commandBuffer);
683                }
684            }
685            pCBNode->memObjs.clear();
686        }
687        pCBNode->validate_functions.clear();
688    }
689}
690// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
691static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
692    clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
693}
694
695// For given MemObjInfo, report Obj & CB bindings. Clear any object bindings.
696static bool ReportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
697    bool skip_call = false;
698    size_t cmdBufRefCount = pMemObjInfo->command_buffer_bindings.size();
699    size_t objRefCount = pMemObjInfo->obj_bindings.size();
700
701    if ((pMemObjInfo->command_buffer_bindings.size()) != 0) {
702        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
703                            (uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
704                            "Attempting to free memory object 0x%" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
705                            " references",
706                            (uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
707    }
708
709    if (cmdBufRefCount > 0 && pMemObjInfo->command_buffer_bindings.size() > 0) {
710        for (auto cb : pMemObjInfo->command_buffer_bindings) {
711            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
712                    (uint64_t)cb, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
713                    "Command Buffer 0x%p still has a reference to mem obj 0x%" PRIxLEAST64, cb, (uint64_t)pMemObjInfo->mem);
714        }
715        // Clear the list of hanging references
716        pMemObjInfo->command_buffer_bindings.clear();
717    }
718
719    if (objRefCount > 0 && pMemObjInfo->obj_bindings.size() > 0) {
720        for (auto obj : pMemObjInfo->obj_bindings) {
721            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__,
722                    MEMTRACK_FREED_MEM_REF, "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64,
723                    obj.handle, (uint64_t)pMemObjInfo->mem);
724            // Clear mem binding for bound objects
725            switch (obj.type) {
726            case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
727                auto image_node = getImageNode(dev_data, reinterpret_cast<VkImage &>(obj.handle));
728                assert(image_node); // Any destroyed images should already be removed from bindings
729                image_node->mem = MEMORY_UNBOUND;
730                break;
731            }
732            case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
733                auto buff_node = getBufferNode(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
734                assert(buff_node); // Any destroyed buffers should already be removed from bindings
735                buff_node->mem = MEMORY_UNBOUND;
736                break;
737            }
738            default:
739                // Should only have buffer or image objects bound to memory
740                assert(0);
741            }
742        }
743        // Clear the list of hanging references
744        pMemObjInfo->obj_bindings.clear();
745    }
746    return skip_call;
747}
748
749static bool freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, bool internal) {
750    bool skip_call = false;
751    // Parse global list to find info w/ mem
752    DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
753    if (pInfo) {
754        // TODO: Verify against Valid Use section
755        // Clear any CB bindings for completed CBs
756        //   TODO : Is there a better place to do this?
757
758        assert(pInfo->object != VK_NULL_HANDLE);
759        // clear_cmd_buf_and_mem_references removes elements from
760        // pInfo->command_buffer_bindings -- this copy not needed in c++14,
761        // and probably not needed in practice in c++11
762        auto bindings = pInfo->command_buffer_bindings;
763        for (auto cb : bindings) {
764            if (!dev_data->globalInFlightCmdBuffers.count(cb)) {
765                clear_cmd_buf_and_mem_references(dev_data, cb);
766            }
767        }
768        // Now check for any remaining references to this mem obj and remove bindings
769        if (pInfo->command_buffer_bindings.size() || pInfo->obj_bindings.size()) {
770            skip_call |= ReportMemReferencesAndCleanUp(dev_data, pInfo);
771        }
772        // Delete mem obj info
773        dev_data->memObjMap.erase(dev_data->memObjMap.find(mem));
774    } else if (VK_NULL_HANDLE != mem) {
775        // The request is to free an invalid, non-zero handle
776        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
777                            VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
778                            reinterpret_cast<uint64_t &>(mem), __LINE__,
779                            MEMTRACK_INVALID_MEM_OBJ,
780                            "MEM", "Request to delete memory object 0x%"
781                            PRIxLEAST64 " not present in memory Object Map",
782                            reinterpret_cast<uint64_t &>(mem));
783    }
784    return skip_call;
785}
786
787// Remove object binding performs 3 tasks:
788// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
789// 2. Clear mem binding for image/buffer by setting its handle to 0
790// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
791static bool clear_object_binding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
792    // TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
793    bool skip_call = false;
794    VkDeviceMemory *pMemBinding = GetObjectMemBinding(dev_data, handle, type);
795    if (pMemBinding) {
796        DEVICE_MEM_INFO *pMemObjInfo = getMemObjInfo(dev_data, *pMemBinding);
797        // TODO : Make sure this is a reasonable way to reset mem binding
798        *pMemBinding = VK_NULL_HANDLE;
799        if (pMemObjInfo) {
800            // This obj is bound to a memory object. Remove the reference to this object in that memory object's list,
801            // and set the objects memory binding pointer to NULL.
802            if (!pMemObjInfo->obj_bindings.erase({handle, type})) {
803                skip_call |=
804                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
805                            "MEM", "While trying to clear mem binding for %s obj 0x%" PRIxLEAST64
806                                   ", unable to find that object referenced by mem obj 0x%" PRIxLEAST64,
807                            object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
808            }
809        }
810    }
811    return skip_call;
812}
813
814// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
815bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
816                              const char *type_name) {
817    bool result = false;
818    if (VK_NULL_HANDLE == mem) {
819        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
820                         __LINE__, MEMTRACK_OBJECT_NOT_BOUND, "MEM",
821                         "%s: Vk%s object 0x%" PRIxLEAST64 " used with no memory bound. Memory should be bound by calling "
822                         "vkBind%sMemory().",
823                         api_name, type_name, handle, type_name);
824    } else if (MEMORY_UNBOUND == mem) {
825        result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
826                         __LINE__, MEMTRACK_OBJECT_NOT_BOUND, "MEM",
827                         "%s: Vk%s object 0x%" PRIxLEAST64 " used with no memory bound and previously bound memory was freed. "
828                         "Memory must not be freed prior to this operation.",
829                         api_name, type_name, handle);
830    }
831    return result;
832}
833
834// Check to see if memory was ever bound to this image
835bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_NODE *image_node, const char *api_name) {
836    bool result = false;
837    if (0 == (static_cast<uint32_t>(image_node->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
838        result = VerifyBoundMemoryIsValid(dev_data, image_node->mem, reinterpret_cast<const uint64_t &>(image_node->image),
839                                          api_name, "Image");
840    }
841    return result;
842}
843
844// Check to see if memory was bound to this buffer
845bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_NODE *buffer_node, const char *api_name) {
846    bool result = false;
847    if (0 == (static_cast<uint32_t>(buffer_node->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
848        result = VerifyBoundMemoryIsValid(dev_data, buffer_node->mem, reinterpret_cast<const uint64_t &>(buffer_node->buffer),
849                                          api_name, "Buffer");
850    }
851    return result;
852}
853
854// For NULL mem case, output warning
855// Make sure given object is in global object map
856//  IF a previous binding existed, output validation error
857//  Otherwise, add reference from objectInfo to memoryInfo
858//  Add reference off of objInfo
859static bool SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VkDebugReportObjectTypeEXT type,
860                          const char *apiName) {
861    bool skip_call = false;
862    // Handle NULL case separately, just clear previous binding & decrement reference
863    if (mem == VK_NULL_HANDLE) {
864        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
865                            "MEM", "In %s, attempting to Bind Obj(0x%" PRIxLEAST64 ") to NULL", apiName, handle);
866    } else {
867        bool sparse = false;
868        VkDeviceMemory *mem_binding = GetObjectMemBinding(dev_data, handle, type, &sparse);
869        assert(mem_binding);
870        DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
871        if (mem_info) {
872            DEVICE_MEM_INFO *prev_binding = getMemObjInfo(dev_data, *mem_binding);
873            if (prev_binding) {
874                skip_call |=
875                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
876                            reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
877                            "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
878                            ") which has already been bound to mem object 0x%" PRIxLEAST64,
879                            apiName, reinterpret_cast<uint64_t &>(mem), handle, reinterpret_cast<uint64_t &>(prev_binding->mem));
880            } else if ((*mem_binding == MEMORY_UNBOUND) && (!sparse)) {
881                skip_call |=
882                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
883                            reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
884                            "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
885                            ") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
886                            "Vulkan so this attempt to bind to new memory is not allowed.",
887                            apiName, reinterpret_cast<uint64_t &>(mem), handle);
888            } else {
889                mem_info->obj_bindings.insert({handle, type});
890                // For image objects, make sure default memory state is correctly set
891                // TODO : What's the best/correct way to handle this?
892                if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
893                    auto const image_node = getImageNode(dev_data, VkImage(handle));
894                    if (image_node) {
895                        VkImageCreateInfo ici = image_node->createInfo;
896                        if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
897                            // TODO::  More memory state transition stuff.
898                        }
899                    }
900                }
901                *mem_binding = mem;
902            }
903        }
904    }
905    return skip_call;
906}
907
908// For NULL mem case, clear any previous binding Else...
909// Make sure given object is in its object map
910//  IF a previous binding existed, update binding
911//  Add reference from objectInfo to memoryInfo
912//  Add reference off of object's binding info
913// Return VK_TRUE if addition is successful, VK_FALSE otherwise
914static bool set_sparse_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
915                                       VkDebugReportObjectTypeEXT type, const char *apiName) {
916    bool skip_call = VK_FALSE;
917    // Handle NULL case separately, just clear previous binding & decrement reference
918    if (mem == VK_NULL_HANDLE) {
919        skip_call = clear_object_binding(dev_data, handle, type);
920    } else {
921        VkDeviceMemory *pMemBinding = GetObjectMemBinding(dev_data, handle, type);
922        assert(pMemBinding);
923        DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
924        if (pInfo) {
925            pInfo->obj_bindings.insert({handle, type});
926            // Need to set mem binding for this object
927            *pMemBinding = mem;
928        }
929    }
930    return skip_call;
931}
932
933// For handle of given object type, return memory binding
934static bool get_mem_for_type(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
935    bool skip_call = false;
936    *mem = VK_NULL_HANDLE;
937    switch (type) {
938    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
939        *mem = getImageNode(dev_data, VkImage(handle))->mem;
940        break;
941    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
942        *mem = getBufferNode(dev_data, VkBuffer(handle))->mem;
943        break;
944    default:
945        assert(0);
946    }
947    if (!*mem) {
948        skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
949                            "MEM", "Trying to get mem binding for %s object 0x%" PRIxLEAST64
950                                   " but binding is NULL. Has memory been bound to this object?",
951                            object_type_to_string(type), handle);
952    }
953    return skip_call;
954}
955
956// Print details of MemObjInfo list
957static void print_mem_list(layer_data *dev_data) {
958    // Early out if info is not requested
959    if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
960        return;
961    }
962
963    // Just printing each msg individually for now, may want to package these into single large print
964    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
965            MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
966            dev_data->memObjMap.size());
967    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
968            MEMTRACK_NONE, "MEM", "=============================");
969
970    if (dev_data->memObjMap.size() <= 0)
971        return;
972
973    for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
974        auto mem_info = (*ii).second.get();
975
976        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
977                __LINE__, MEMTRACK_NONE, "MEM", "    ===MemObjInfo at 0x%p===", (void *)mem_info);
978        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
979                __LINE__, MEMTRACK_NONE, "MEM", "    Mem object: 0x%" PRIxLEAST64, (uint64_t)(mem_info->mem));
980        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
981                __LINE__, MEMTRACK_NONE, "MEM", "    Ref Count: " PRINTF_SIZE_T_SPECIFIER,
982                mem_info->command_buffer_bindings.size() + mem_info->obj_bindings.size());
983        if (0 != mem_info->alloc_info.allocationSize) {
984            string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&mem_info->alloc_info, "MEM(INFO):         ");
985            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
986                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
987        } else {
988            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
989                    __LINE__, MEMTRACK_NONE, "MEM", "    Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
990        }
991
992        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
993                __LINE__, MEMTRACK_NONE, "MEM", "    VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
994                mem_info->obj_bindings.size());
995        if (mem_info->obj_bindings.size() > 0) {
996            for (auto obj : mem_info->obj_bindings) {
997                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
998                        0, __LINE__, MEMTRACK_NONE, "MEM", "       VK OBJECT 0x%" PRIx64, obj.handle);
999            }
1000        }
1001
1002        log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
1003                __LINE__, MEMTRACK_NONE, "MEM",
1004                "    VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
1005                mem_info->command_buffer_bindings.size());
1006        if (mem_info->command_buffer_bindings.size() > 0) {
1007            for (auto cb : mem_info->command_buffer_bindings) {
1008                log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
1009                        0, __LINE__, MEMTRACK_NONE, "MEM", "      VK CB 0x%p", cb);
1010            }
1011        }
1012    }
1013}
1014
1015static void printCBList(layer_data *my_data) {
1016    GLOBAL_CB_NODE *pCBInfo = NULL;
1017
1018    // Early out if info is not requested
1019    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
1020        return;
1021    }
1022
1023    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
1024            MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
1025            my_data->commandBufferMap.size());
1026    log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
1027            MEMTRACK_NONE, "MEM", "==================");
1028
1029    if (my_data->commandBufferMap.size() <= 0)
1030        return;
1031
1032    for (auto &cb_node : my_data->commandBufferMap) {
1033        pCBInfo = cb_node.second;
1034
1035        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
1036                __LINE__, MEMTRACK_NONE, "MEM", "    CB Info (0x%p) has CB 0x%p", (void *)pCBInfo, (void *)pCBInfo->commandBuffer);
1037
1038        if (pCBInfo->memObjs.size() <= 0)
1039            continue;
1040        for (auto obj : pCBInfo->memObjs) {
1041            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
1042                    __LINE__, MEMTRACK_NONE, "MEM", "      Mem obj 0x%" PRIx64, (uint64_t)obj);
1043        }
1044    }
1045}
1046
1047// Return a string representation of CMD_TYPE enum
1048static string cmdTypeToString(CMD_TYPE cmd) {
1049    switch (cmd) {
1050    case CMD_BINDPIPELINE:
1051        return "CMD_BINDPIPELINE";
1052    case CMD_BINDPIPELINEDELTA:
1053        return "CMD_BINDPIPELINEDELTA";
1054    case CMD_SETVIEWPORTSTATE:
1055        return "CMD_SETVIEWPORTSTATE";
1056    case CMD_SETLINEWIDTHSTATE:
1057        return "CMD_SETLINEWIDTHSTATE";
1058    case CMD_SETDEPTHBIASSTATE:
1059        return "CMD_SETDEPTHBIASSTATE";
1060    case CMD_SETBLENDSTATE:
1061        return "CMD_SETBLENDSTATE";
1062    case CMD_SETDEPTHBOUNDSSTATE:
1063        return "CMD_SETDEPTHBOUNDSSTATE";
1064    case CMD_SETSTENCILREADMASKSTATE:
1065        return "CMD_SETSTENCILREADMASKSTATE";
1066    case CMD_SETSTENCILWRITEMASKSTATE:
1067        return "CMD_SETSTENCILWRITEMASKSTATE";
1068    case CMD_SETSTENCILREFERENCESTATE:
1069        return "CMD_SETSTENCILREFERENCESTATE";
1070    case CMD_BINDDESCRIPTORSETS:
1071        return "CMD_BINDDESCRIPTORSETS";
1072    case CMD_BINDINDEXBUFFER:
1073        return "CMD_BINDINDEXBUFFER";
1074    case CMD_BINDVERTEXBUFFER:
1075        return "CMD_BINDVERTEXBUFFER";
1076    case CMD_DRAW:
1077        return "CMD_DRAW";
1078    case CMD_DRAWINDEXED:
1079        return "CMD_DRAWINDEXED";
1080    case CMD_DRAWINDIRECT:
1081        return "CMD_DRAWINDIRECT";
1082    case CMD_DRAWINDEXEDINDIRECT:
1083        return "CMD_DRAWINDEXEDINDIRECT";
1084    case CMD_DISPATCH:
1085        return "CMD_DISPATCH";
1086    case CMD_DISPATCHINDIRECT:
1087        return "CMD_DISPATCHINDIRECT";
1088    case CMD_COPYBUFFER:
1089        return "CMD_COPYBUFFER";
1090    case CMD_COPYIMAGE:
1091        return "CMD_COPYIMAGE";
1092    case CMD_BLITIMAGE:
1093        return "CMD_BLITIMAGE";
1094    case CMD_COPYBUFFERTOIMAGE:
1095        return "CMD_COPYBUFFERTOIMAGE";
1096    case CMD_COPYIMAGETOBUFFER:
1097        return "CMD_COPYIMAGETOBUFFER";
1098    case CMD_CLONEIMAGEDATA:
1099        return "CMD_CLONEIMAGEDATA";
1100    case CMD_UPDATEBUFFER:
1101        return "CMD_UPDATEBUFFER";
1102    case CMD_FILLBUFFER:
1103        return "CMD_FILLBUFFER";
1104    case CMD_CLEARCOLORIMAGE:
1105        return "CMD_CLEARCOLORIMAGE";
1106    case CMD_CLEARATTACHMENTS:
1107        return "CMD_CLEARCOLORATTACHMENT";
1108    case CMD_CLEARDEPTHSTENCILIMAGE:
1109        return "CMD_CLEARDEPTHSTENCILIMAGE";
1110    case CMD_RESOLVEIMAGE:
1111        return "CMD_RESOLVEIMAGE";
1112    case CMD_SETEVENT:
1113        return "CMD_SETEVENT";
1114    case CMD_RESETEVENT:
1115        return "CMD_RESETEVENT";
1116    case CMD_WAITEVENTS:
1117        return "CMD_WAITEVENTS";
1118    case CMD_PIPELINEBARRIER:
1119        return "CMD_PIPELINEBARRIER";
1120    case CMD_BEGINQUERY:
1121        return "CMD_BEGINQUERY";
1122    case CMD_ENDQUERY:
1123        return "CMD_ENDQUERY";
1124    case CMD_RESETQUERYPOOL:
1125        return "CMD_RESETQUERYPOOL";
1126    case CMD_COPYQUERYPOOLRESULTS:
1127        return "CMD_COPYQUERYPOOLRESULTS";
1128    case CMD_WRITETIMESTAMP:
1129        return "CMD_WRITETIMESTAMP";
1130    case CMD_INITATOMICCOUNTERS:
1131        return "CMD_INITATOMICCOUNTERS";
1132    case CMD_LOADATOMICCOUNTERS:
1133        return "CMD_LOADATOMICCOUNTERS";
1134    case CMD_SAVEATOMICCOUNTERS:
1135        return "CMD_SAVEATOMICCOUNTERS";
1136    case CMD_BEGINRENDERPASS:
1137        return "CMD_BEGINRENDERPASS";
1138    case CMD_ENDRENDERPASS:
1139        return "CMD_ENDRENDERPASS";
1140    default:
1141        return "UNKNOWN";
1142    }
1143}
1144
1145// SPIRV utility functions
1146static void build_def_index(shader_module *module) {
1147    for (auto insn : *module) {
1148        switch (insn.opcode()) {
1149        /* Types */
1150        case spv::OpTypeVoid:
1151        case spv::OpTypeBool:
1152        case spv::OpTypeInt:
1153        case spv::OpTypeFloat:
1154        case spv::OpTypeVector:
1155        case spv::OpTypeMatrix:
1156        case spv::OpTypeImage:
1157        case spv::OpTypeSampler:
1158        case spv::OpTypeSampledImage:
1159        case spv::OpTypeArray:
1160        case spv::OpTypeRuntimeArray:
1161        case spv::OpTypeStruct:
1162        case spv::OpTypeOpaque:
1163        case spv::OpTypePointer:
1164        case spv::OpTypeFunction:
1165        case spv::OpTypeEvent:
1166        case spv::OpTypeDeviceEvent:
1167        case spv::OpTypeReserveId:
1168        case spv::OpTypeQueue:
1169        case spv::OpTypePipe:
1170            module->def_index[insn.word(1)] = insn.offset();
1171            break;
1172
1173        /* Fixed constants */
1174        case spv::OpConstantTrue:
1175        case spv::OpConstantFalse:
1176        case spv::OpConstant:
1177        case spv::OpConstantComposite:
1178        case spv::OpConstantSampler:
1179        case spv::OpConstantNull:
1180            module->def_index[insn.word(2)] = insn.offset();
1181            break;
1182
1183        /* Specialization constants */
1184        case spv::OpSpecConstantTrue:
1185        case spv::OpSpecConstantFalse:
1186        case spv::OpSpecConstant:
1187        case spv::OpSpecConstantComposite:
1188        case spv::OpSpecConstantOp:
1189            module->def_index[insn.word(2)] = insn.offset();
1190            break;
1191
1192        /* Variables */
1193        case spv::OpVariable:
1194            module->def_index[insn.word(2)] = insn.offset();
1195            break;
1196
1197        /* Functions */
1198        case spv::OpFunction:
1199            module->def_index[insn.word(2)] = insn.offset();
1200            break;
1201
1202        default:
1203            /* We don't care about any other defs for now. */
1204            break;
1205        }
1206    }
1207}
1208
1209static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1210    for (auto insn : *src) {
1211        if (insn.opcode() == spv::OpEntryPoint) {
1212            auto entrypointName = (char const *)&insn.word(3);
1213            auto entrypointStageBits = 1u << insn.word(1);
1214
1215            if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1216                return insn;
1217            }
1218        }
1219    }
1220
1221    return src->end();
1222}
1223
1224static char const *storage_class_name(unsigned sc) {
1225    switch (sc) {
1226    case spv::StorageClassInput:
1227        return "input";
1228    case spv::StorageClassOutput:
1229        return "output";
1230    case spv::StorageClassUniformConstant:
1231        return "const uniform";
1232    case spv::StorageClassUniform:
1233        return "uniform";
1234    case spv::StorageClassWorkgroup:
1235        return "workgroup local";
1236    case spv::StorageClassCrossWorkgroup:
1237        return "workgroup global";
1238    case spv::StorageClassPrivate:
1239        return "private global";
1240    case spv::StorageClassFunction:
1241        return "function";
1242    case spv::StorageClassGeneric:
1243        return "generic";
1244    case spv::StorageClassAtomicCounter:
1245        return "atomic counter";
1246    case spv::StorageClassImage:
1247        return "image";
1248    case spv::StorageClassPushConstant:
1249        return "push constant";
1250    default:
1251        return "unknown";
1252    }
1253}
1254
1255/* get the value of an integral constant */
1256unsigned get_constant_value(shader_module const *src, unsigned id) {
1257    auto value = src->get_def(id);
1258    assert(value != src->end());
1259
1260    if (value.opcode() != spv::OpConstant) {
1261        /* TODO: Either ensure that the specialization transform is already performed on a module we're
1262            considering here, OR -- specialize on the fly now.
1263            */
1264        return 1;
1265    }
1266
1267    return value.word(3);
1268}
1269
1270
1271static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
1272    auto insn = src->get_def(type);
1273    assert(insn != src->end());
1274
1275    switch (insn.opcode()) {
1276    case spv::OpTypeBool:
1277        ss << "bool";
1278        break;
1279    case spv::OpTypeInt:
1280        ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1281        break;
1282    case spv::OpTypeFloat:
1283        ss << "float" << insn.word(2);
1284        break;
1285    case spv::OpTypeVector:
1286        ss << "vec" << insn.word(3) << " of ";
1287        describe_type_inner(ss, src, insn.word(2));
1288        break;
1289    case spv::OpTypeMatrix:
1290        ss << "mat" << insn.word(3) << " of ";
1291        describe_type_inner(ss, src, insn.word(2));
1292        break;
1293    case spv::OpTypeArray:
1294        ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1295        describe_type_inner(ss, src, insn.word(2));
1296        break;
1297    case spv::OpTypePointer:
1298        ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1299        describe_type_inner(ss, src, insn.word(3));
1300        break;
1301    case spv::OpTypeStruct: {
1302        ss << "struct of (";
1303        for (unsigned i = 2; i < insn.len(); i++) {
1304            describe_type_inner(ss, src, insn.word(i));
1305            if (i == insn.len() - 1) {
1306                ss << ")";
1307            } else {
1308                ss << ", ";
1309            }
1310        }
1311        break;
1312    }
1313    case spv::OpTypeSampler:
1314        ss << "sampler";
1315        break;
1316    case spv::OpTypeSampledImage:
1317        ss << "sampler+";
1318        describe_type_inner(ss, src, insn.word(2));
1319        break;
1320    case spv::OpTypeImage:
1321        ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1322        break;
1323    default:
1324        ss << "oddtype";
1325        break;
1326    }
1327}
1328
1329
1330static std::string describe_type(shader_module const *src, unsigned type) {
1331    std::ostringstream ss;
1332    describe_type_inner(ss, src, type);
1333    return ss.str();
1334}
1335
1336
1337static bool is_narrow_numeric_type(spirv_inst_iter type)
1338{
1339    if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat)
1340        return false;
1341    return type.word(2) < 64;
1342}
1343
1344
1345static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) {
1346    /* walk two type trees together, and complain about differences */
1347    auto a_insn = a->get_def(a_type);
1348    auto b_insn = b->get_def(b_type);
1349    assert(a_insn != a->end());
1350    assert(b_insn != b->end());
1351
1352    if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
1353        return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
1354    }
1355
1356    if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
1357        /* we probably just found the extra level of arrayness in b_type: compare the type inside it to a_type */
1358        return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1359    }
1360
1361    if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1362        return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
1363    }
1364
1365    if (a_insn.opcode() != b_insn.opcode()) {
1366        return false;
1367    }
1368
1369    if (a_insn.opcode() == spv::OpTypePointer) {
1370        /* match on pointee type. storage class is expected to differ */
1371        return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
1372    }
1373
1374    if (a_arrayed || b_arrayed) {
1375        /* if we havent resolved array-of-verts by here, we're not going to. */
1376        return false;
1377    }
1378
1379    switch (a_insn.opcode()) {
1380    case spv::OpTypeBool:
1381        return true;
1382    case spv::OpTypeInt:
1383        /* match on width, signedness */
1384        return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
1385    case spv::OpTypeFloat:
1386        /* match on width */
1387        return a_insn.word(2) == b_insn.word(2);
1388    case spv::OpTypeVector:
1389        /* match on element type, count. */
1390        if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false))
1391            return false;
1392        if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1393            return a_insn.word(3) >= b_insn.word(3);
1394        }
1395        else {
1396            return a_insn.word(3) == b_insn.word(3);
1397        }
1398    case spv::OpTypeMatrix:
1399        /* match on element type, count. */
1400        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3);
1401    case spv::OpTypeArray:
1402        /* match on element type, count. these all have the same layout. we don't get here if
1403         * b_arrayed. This differs from vector & matrix types in that the array size is the id of a constant instruction,
1404         * not a literal within OpTypeArray */
1405        return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
1406               get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1407    case spv::OpTypeStruct:
1408        /* match on all element types */
1409        {
1410            if (a_insn.len() != b_insn.len()) {
1411                return false; /* structs cannot match if member counts differ */
1412            }
1413
1414            for (unsigned i = 2; i < a_insn.len(); i++) {
1415                if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
1416                    return false;
1417                }
1418            }
1419
1420            return true;
1421        }
1422    default:
1423        /* remaining types are CLisms, or may not appear in the interfaces we
1424         * are interested in. Just claim no match.
1425         */
1426        return false;
1427    }
1428}
1429
1430static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1431    auto it = map.find(id);
1432    if (it == map.end())
1433        return def;
1434    else
1435        return it->second;
1436}
1437
1438static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1439    auto insn = src->get_def(type);
1440    assert(insn != src->end());
1441
1442    switch (insn.opcode()) {
1443    case spv::OpTypePointer:
1444        /* see through the ptr -- this is only ever at the toplevel for graphics shaders;
1445         * we're never actually passing pointers around. */
1446        return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1447    case spv::OpTypeArray:
1448        if (strip_array_level) {
1449            return get_locations_consumed_by_type(src, insn.word(2), false);
1450        } else {
1451            return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1452        }
1453    case spv::OpTypeMatrix:
1454        /* num locations is the dimension * element size */
1455        return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
1456    case spv::OpTypeVector: {
1457        auto scalar_type = src->get_def(insn.word(2));
1458        auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ?
1459            scalar_type.word(2) : 32;
1460
1461        /* locations are 128-bit wide; 3- and 4-component vectors of 64 bit
1462         * types require two. */
1463        return (bit_width * insn.word(3) + 127) / 128;
1464    }
1465    default:
1466        /* everything else is just 1. */
1467        return 1;
1468
1469        /* TODO: extend to handle 64bit scalar types, whose vectors may need
1470         * multiple locations. */
1471    }
1472}
1473
1474static unsigned get_locations_consumed_by_format(VkFormat format) {
1475    switch (format) {
1476    case VK_FORMAT_R64G64B64A64_SFLOAT:
1477    case VK_FORMAT_R64G64B64A64_SINT:
1478    case VK_FORMAT_R64G64B64A64_UINT:
1479    case VK_FORMAT_R64G64B64_SFLOAT:
1480    case VK_FORMAT_R64G64B64_SINT:
1481    case VK_FORMAT_R64G64B64_UINT:
1482        return 2;
1483    default:
1484        return 1;
1485    }
1486}
1487
1488typedef std::pair<unsigned, unsigned> location_t;
1489typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1490
1491struct interface_var {
1492    uint32_t id;
1493    uint32_t type_id;
1494    uint32_t offset;
1495    bool is_patch;
1496    bool is_block_member;
1497    /* TODO: collect the name, too? Isn't required to be present. */
1498};
1499
1500struct shader_stage_attributes {
1501    char const *const name;
1502    bool arrayed_input;
1503    bool arrayed_output;
1504};
1505
1506static shader_stage_attributes shader_stage_attribs[] = {
1507    {"vertex shader", false, false},
1508    {"tessellation control shader", true, true},
1509    {"tessellation evaluation shader", true, false},
1510    {"geometry shader", true, false},
1511    {"fragment shader", false, false},
1512};
1513
1514static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1515    while (true) {
1516
1517        if (def.opcode() == spv::OpTypePointer) {
1518            def = src->get_def(def.word(3));
1519        } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1520            def = src->get_def(def.word(2));
1521            is_array_of_verts = false;
1522        } else if (def.opcode() == spv::OpTypeStruct) {
1523            return def;
1524        } else {
1525            return src->end();
1526        }
1527    }
1528}
1529
1530static void collect_interface_block_members(shader_module const *src,
1531                                            std::map<location_t, interface_var> *out,
1532                                            std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
1533                                            uint32_t id, uint32_t type_id, bool is_patch) {
1534    /* Walk down the type_id presented, trying to determine whether it's actually an interface block. */
1535    auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
1536    if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
1537        /* this isn't an interface block. */
1538        return;
1539    }
1540
1541    std::unordered_map<unsigned, unsigned> member_components;
1542
1543    /* Walk all the OpMemberDecorate for type's result id -- first pass, collect components. */
1544    for (auto insn : *src) {
1545        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1546            unsigned member_index = insn.word(2);
1547
1548            if (insn.word(3) == spv::DecorationComponent) {
1549                unsigned component = insn.word(4);
1550                member_components[member_index] = component;
1551            }
1552        }
1553    }
1554
1555    /* Second pass -- produce the output, from Location decorations */
1556    for (auto insn : *src) {
1557        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1558            unsigned member_index = insn.word(2);
1559            unsigned member_type_id = type.word(2 + member_index);
1560
1561            if (insn.word(3) == spv::DecorationLocation) {
1562                unsigned location = insn.word(4);
1563                unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1564                auto component_it = member_components.find(member_index);
1565                unsigned component = component_it == member_components.end() ? 0 : component_it->second;
1566
1567                for (unsigned int offset = 0; offset < num_locations; offset++) {
1568                    interface_var v;
1569                    v.id = id;
1570                    /* TODO: member index in interface_var too? */
1571                    v.type_id = member_type_id;
1572                    v.offset = offset;
1573                    v.is_patch = is_patch;
1574                    v.is_block_member = true;
1575                    (*out)[std::make_pair(location + offset, component)] = v;
1576                }
1577            }
1578        }
1579    }
1580}
1581
1582static std::map<location_t, interface_var> collect_interface_by_location(
1583        shader_module const *src, spirv_inst_iter entrypoint,
1584        spv::StorageClass sinterface, bool is_array_of_verts) {
1585
1586    std::unordered_map<unsigned, unsigned> var_locations;
1587    std::unordered_map<unsigned, unsigned> var_builtins;
1588    std::unordered_map<unsigned, unsigned> var_components;
1589    std::unordered_map<unsigned, unsigned> blocks;
1590    std::unordered_map<unsigned, unsigned> var_patch;
1591
1592    for (auto insn : *src) {
1593
1594        /* We consider two interface models: SSO rendezvous-by-location, and
1595         * builtins. Complain about anything that fits neither model.
1596         */
1597        if (insn.opcode() == spv::OpDecorate) {
1598            if (insn.word(2) == spv::DecorationLocation) {
1599                var_locations[insn.word(1)] = insn.word(3);
1600            }
1601
1602            if (insn.word(2) == spv::DecorationBuiltIn) {
1603                var_builtins[insn.word(1)] = insn.word(3);
1604            }
1605
1606            if (insn.word(2) == spv::DecorationComponent) {
1607                var_components[insn.word(1)] = insn.word(3);
1608            }
1609
1610            if (insn.word(2) == spv::DecorationBlock) {
1611                blocks[insn.word(1)] = 1;
1612            }
1613
1614            if (insn.word(2) == spv::DecorationPatch) {
1615                var_patch[insn.word(1)] = 1;
1616            }
1617        }
1618    }
1619
1620    /* TODO: handle grouped decorations */
1621    /* TODO: handle index=1 dual source outputs from FS -- two vars will
1622     * have the same location, and we DON'T want to clobber. */
1623
1624    /* find the end of the entrypoint's name string. additional zero bytes follow the actual null
1625       terminator, to fill out the rest of the word - so we only need to look at the last byte in
1626       the word to determine which word contains the terminator. */
1627    uint32_t word = 3;
1628    while (entrypoint.word(word) & 0xff000000u) {
1629        ++word;
1630    }
1631    ++word;
1632
1633    std::map<location_t, interface_var> out;
1634
1635    for (; word < entrypoint.len(); word++) {
1636        auto insn = src->get_def(entrypoint.word(word));
1637        assert(insn != src->end());
1638        assert(insn.opcode() == spv::OpVariable);
1639
1640        if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
1641            unsigned id = insn.word(2);
1642            unsigned type = insn.word(1);
1643
1644            int location = value_or_default(var_locations, id, -1);
1645            int builtin = value_or_default(var_builtins, id, -1);
1646            unsigned component = value_or_default(var_components, id, 0); /* unspecified is OK, is 0 */
1647            bool is_patch = var_patch.find(id) != var_patch.end();
1648
1649            /* All variables and interface block members in the Input or Output storage classes
1650             * must be decorated with either a builtin or an explicit location.
1651             *
1652             * TODO: integrate the interface block support here. For now, don't complain --
1653             * a valid SPIRV module will only hit this path for the interface block case, as the
1654             * individual members of the type are decorated, rather than variable declarations.
1655             */
1656
1657            if (location != -1) {
1658                /* A user-defined interface variable, with a location. Where a variable
1659                 * occupied multiple locations, emit one result for each. */
1660                unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
1661                for (unsigned int offset = 0; offset < num_locations; offset++) {
1662                    interface_var v;
1663                    v.id = id;
1664                    v.type_id = type;
1665                    v.offset = offset;
1666                    v.is_patch = is_patch;
1667                    v.is_block_member = false;
1668                    out[std::make_pair(location + offset, component)] = v;
1669                }
1670            } else if (builtin == -1) {
1671                /* An interface block instance */
1672                collect_interface_block_members(src, &out, blocks, is_array_of_verts, id, type, is_patch);
1673            }
1674        }
1675    }
1676
1677    return out;
1678}
1679
1680static std::vector<std::pair<uint32_t, interface_var>> collect_interface_by_input_attachment_index(
1681        debug_report_data *report_data, shader_module const *src,
1682        std::unordered_set<uint32_t> const &accessible_ids) {
1683
1684    std::vector<std::pair<uint32_t, interface_var>> out;
1685
1686    for (auto insn : *src) {
1687        if (insn.opcode() == spv::OpDecorate) {
1688            if (insn.word(2) == spv::DecorationInputAttachmentIndex) {
1689                auto attachment_index = insn.word(3);
1690                auto id = insn.word(1);
1691
1692                if (accessible_ids.count(id)) {
1693                    auto def = src->get_def(id);
1694                    assert(def != src->end());
1695
1696                    if (def.opcode() == spv::OpVariable && insn.word(3) == spv::StorageClassUniformConstant) {
1697                        auto num_locations = get_locations_consumed_by_type(src, def.word(1), false);
1698                        for (unsigned int offset = 0; offset < num_locations; offset++) {
1699                            interface_var v;
1700                            v.id = id;
1701                            v.type_id = def.word(1);
1702                            v.offset = offset;
1703                            v.is_patch = false;
1704                            v.is_block_member = false;
1705                            out.emplace_back(attachment_index + offset, v);
1706                        }
1707                    }
1708                }
1709            }
1710        }
1711    }
1712
1713    return out;
1714}
1715
1716static std::vector<std::pair<descriptor_slot_t, interface_var>> collect_interface_by_descriptor_slot(
1717        debug_report_data *report_data, shader_module const *src,
1718        std::unordered_set<uint32_t> const &accessible_ids) {
1719
1720    std::unordered_map<unsigned, unsigned> var_sets;
1721    std::unordered_map<unsigned, unsigned> var_bindings;
1722
1723    for (auto insn : *src) {
1724        /* All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1725         * DecorationDescriptorSet and DecorationBinding.
1726         */
1727        if (insn.opcode() == spv::OpDecorate) {
1728            if (insn.word(2) == spv::DecorationDescriptorSet) {
1729                var_sets[insn.word(1)] = insn.word(3);
1730            }
1731
1732            if (insn.word(2) == spv::DecorationBinding) {
1733                var_bindings[insn.word(1)] = insn.word(3);
1734            }
1735        }
1736    }
1737
1738    std::vector<std::pair<descriptor_slot_t, interface_var>> out;
1739
1740    for (auto id : accessible_ids) {
1741        auto insn = src->get_def(id);
1742        assert(insn != src->end());
1743
1744        if (insn.opcode() == spv::OpVariable &&
1745            (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1746            unsigned set = value_or_default(var_sets, insn.word(2), 0);
1747            unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1748
1749            interface_var v;
1750            v.id = insn.word(2);
1751            v.type_id = insn.word(1);
1752            v.offset = 0;
1753            v.is_patch = false;
1754            v.is_block_member = false;
1755            out.emplace_back(std::make_pair(set, binding), v);
1756        }
1757    }
1758
1759    return out;
1760}
1761
1762static bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer,
1763                                              spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
1764                                              shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
1765                                              shader_stage_attributes const *consumer_stage) {
1766    bool pass = true;
1767
1768    auto outputs = collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, producer_stage->arrayed_output);
1769    auto inputs = collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, consumer_stage->arrayed_input);
1770
1771    auto a_it = outputs.begin();
1772    auto b_it = inputs.begin();
1773
1774    /* maps sorted by key (location); walk them together to find mismatches */
1775    while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1776        bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1777        bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1778        auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1779        auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1780
1781        if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
1782            if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1783                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1784                        "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1785                        a_first.second, consumer_stage->name)) {
1786                pass = false;
1787            }
1788            a_it++;
1789        } else if (a_at_end || a_first > b_first) {
1790            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1791                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
1792                        "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second,
1793                        producer_stage->name)) {
1794                pass = false;
1795            }
1796            b_it++;
1797        } else {
1798            // subtleties of arrayed interfaces:
1799            // - if is_patch, then the member is not arrayed, even though the interface may be.
1800            // - if is_block_member, then the extra array level of an arrayed interface is not
1801            //   expressed in the member type -- it's expressed in the block type.
1802            if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
1803                             producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1804                             consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member,
1805                             true)) {
1806                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1807                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
1808                            a_first.first, a_first.second,
1809                            describe_type(producer, a_it->second.type_id).c_str(),
1810                            describe_type(consumer, b_it->second.type_id).c_str())) {
1811                    pass = false;
1812                }
1813            }
1814            if (a_it->second.is_patch != b_it->second.is_patch) {
1815                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1816                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1817                            "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
1818                            "per-%s in %s stage", a_first.first, a_first.second,
1819                            a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1820                            b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1821                    pass = false;
1822                }
1823            }
1824            a_it++;
1825            b_it++;
1826        }
1827    }
1828
1829    return pass;
1830}
1831
1832enum FORMAT_TYPE {
1833    FORMAT_TYPE_UNDEFINED,
1834    FORMAT_TYPE_FLOAT, /* UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader */
1835    FORMAT_TYPE_SINT,
1836    FORMAT_TYPE_UINT,
1837};
1838
1839static unsigned get_format_type(VkFormat fmt) {
1840    switch (fmt) {
1841    case VK_FORMAT_UNDEFINED:
1842        return FORMAT_TYPE_UNDEFINED;
1843    case VK_FORMAT_R8_SINT:
1844    case VK_FORMAT_R8G8_SINT:
1845    case VK_FORMAT_R8G8B8_SINT:
1846    case VK_FORMAT_R8G8B8A8_SINT:
1847    case VK_FORMAT_R16_SINT:
1848    case VK_FORMAT_R16G16_SINT:
1849    case VK_FORMAT_R16G16B16_SINT:
1850    case VK_FORMAT_R16G16B16A16_SINT:
1851    case VK_FORMAT_R32_SINT:
1852    case VK_FORMAT_R32G32_SINT:
1853    case VK_FORMAT_R32G32B32_SINT:
1854    case VK_FORMAT_R32G32B32A32_SINT:
1855    case VK_FORMAT_R64_SINT:
1856    case VK_FORMAT_R64G64_SINT:
1857    case VK_FORMAT_R64G64B64_SINT:
1858    case VK_FORMAT_R64G64B64A64_SINT:
1859    case VK_FORMAT_B8G8R8_SINT:
1860    case VK_FORMAT_B8G8R8A8_SINT:
1861    case VK_FORMAT_A8B8G8R8_SINT_PACK32:
1862    case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1863    case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1864        return FORMAT_TYPE_SINT;
1865    case VK_FORMAT_R8_UINT:
1866    case VK_FORMAT_R8G8_UINT:
1867    case VK_FORMAT_R8G8B8_UINT:
1868    case VK_FORMAT_R8G8B8A8_UINT:
1869    case VK_FORMAT_R16_UINT:
1870    case VK_FORMAT_R16G16_UINT:
1871    case VK_FORMAT_R16G16B16_UINT:
1872    case VK_FORMAT_R16G16B16A16_UINT:
1873    case VK_FORMAT_R32_UINT:
1874    case VK_FORMAT_R32G32_UINT:
1875    case VK_FORMAT_R32G32B32_UINT:
1876    case VK_FORMAT_R32G32B32A32_UINT:
1877    case VK_FORMAT_R64_UINT:
1878    case VK_FORMAT_R64G64_UINT:
1879    case VK_FORMAT_R64G64B64_UINT:
1880    case VK_FORMAT_R64G64B64A64_UINT:
1881    case VK_FORMAT_B8G8R8_UINT:
1882    case VK_FORMAT_B8G8R8A8_UINT:
1883    case VK_FORMAT_A8B8G8R8_UINT_PACK32:
1884    case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1885    case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1886        return FORMAT_TYPE_UINT;
1887    default:
1888        return FORMAT_TYPE_FLOAT;
1889    }
1890}
1891
1892/* characterizes a SPIR-V type appearing in an interface to a FF stage,
1893 * for comparison to a VkFormat's characterization above. */
1894static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1895    auto insn = src->get_def(type);
1896    assert(insn != src->end());
1897
1898    switch (insn.opcode()) {
1899    case spv::OpTypeInt:
1900        return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1901    case spv::OpTypeFloat:
1902        return FORMAT_TYPE_FLOAT;
1903    case spv::OpTypeVector:
1904        return get_fundamental_type(src, insn.word(2));
1905    case spv::OpTypeMatrix:
1906        return get_fundamental_type(src, insn.word(2));
1907    case spv::OpTypeArray:
1908        return get_fundamental_type(src, insn.word(2));
1909    case spv::OpTypePointer:
1910        return get_fundamental_type(src, insn.word(3));
1911    case spv::OpTypeImage:
1912        return get_fundamental_type(src, insn.word(2));
1913
1914    default:
1915        return FORMAT_TYPE_UNDEFINED;
1916    }
1917}
1918
1919static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1920    uint32_t bit_pos = u_ffs(stage);
1921    return bit_pos - 1;
1922}
1923
1924static bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
1925    /* walk the binding descriptions, which describe the step rate and stride of each vertex buffer.
1926     * each binding should be specified only once.
1927     */
1928    std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1929    bool pass = true;
1930
1931    for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1932        auto desc = &vi->pVertexBindingDescriptions[i];
1933        auto &binding = bindings[desc->binding];
1934        if (binding) {
1935            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1936                        __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1937                        "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1938                pass = false;
1939            }
1940        } else {
1941            binding = desc;
1942        }
1943    }
1944
1945    return pass;
1946}
1947
1948static bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
1949                                          shader_module const *vs, spirv_inst_iter entrypoint) {
1950    bool pass = true;
1951
1952    auto inputs = collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, false);
1953
1954    /* Build index by location */
1955    std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1956    if (vi) {
1957        for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1958            auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1959            for (auto j = 0u; j < num_locations; j++) {
1960                attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1961            }
1962        }
1963    }
1964
1965    auto it_a = attribs.begin();
1966    auto it_b = inputs.begin();
1967    bool used = false;
1968
1969    while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1970        bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1971        bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1972        auto a_first = a_at_end ? 0 : it_a->first;
1973        auto b_first = b_at_end ? 0 : it_b->first.first;
1974        if (!a_at_end && (b_at_end || a_first < b_first)) {
1975            if (!used && log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1976                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
1977                        "Vertex attribute at location %d not consumed by VS", a_first)) {
1978                pass = false;
1979            }
1980            used = false;
1981            it_a++;
1982        } else if (!b_at_end && (a_at_end || b_first < a_first)) {
1983            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, /*dev*/ 0,
1984                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Vertex shader consumes input at location %d but not provided",
1985                        b_first)) {
1986                pass = false;
1987            }
1988            it_b++;
1989        } else {
1990            unsigned attrib_type = get_format_type(it_a->second->format);
1991            unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1992
1993            /* type checking */
1994            if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
1995                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
1996                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1997                            "Attribute type of `%s` at location %d does not match vertex shader input type of `%s`",
1998                            string_VkFormat(it_a->second->format), a_first,
1999                            describe_type(vs, it_b->second.type_id).c_str())) {
2000                    pass = false;
2001                }
2002            }
2003
2004            /* OK! */
2005            used = true;
2006            it_b++;
2007        }
2008    }
2009
2010    return pass;
2011}
2012
2013static bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs,
2014                                                    spirv_inst_iter entrypoint, VkRenderPassCreateInfo const *rpci,
2015                                                    uint32_t subpass_index) {
2016    std::map<uint32_t, VkFormat> color_attachments;
2017    auto subpass = rpci->pSubpasses[subpass_index];
2018    for (auto i = 0u; i < subpass.colorAttachmentCount; ++i) {
2019        uint32_t attachment = subpass.pColorAttachments[i].attachment;
2020        if (attachment == VK_ATTACHMENT_UNUSED)
2021            continue;
2022        if (rpci->pAttachments[attachment].format != VK_FORMAT_UNDEFINED) {
2023            color_attachments[i] = rpci->pAttachments[attachment].format;
2024        }
2025    }
2026
2027    bool pass = true;
2028
2029    /* TODO: dual source blend index (spv::DecIndex, zero if not provided) */
2030
2031    auto outputs = collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, false);
2032
2033    auto it_a = outputs.begin();
2034    auto it_b = color_attachments.begin();
2035
2036    /* Walk attachment list and outputs together */
2037
2038    while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
2039        bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
2040        bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
2041
2042        if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
2043            if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2044                        __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
2045                        "FS writes to output location %d with no matching attachment", it_a->first.first)) {
2046                pass = false;
2047            }
2048            it_a++;
2049        } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
2050            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2051                        __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by FS", it_b->first)) {
2052                pass = false;
2053            }
2054            it_b++;
2055        } else {
2056            unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
2057            unsigned att_type = get_format_type(it_b->second);
2058
2059            /* type checking */
2060            if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
2061                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2062                            __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
2063                            "Attachment %d of type `%s` does not match FS output type of `%s`", it_b->first,
2064                            string_VkFormat(it_b->second),
2065                            describe_type(fs, it_a->second.type_id).c_str())) {
2066                    pass = false;
2067                }
2068            }
2069
2070            /* OK! */
2071            it_a++;
2072            it_b++;
2073        }
2074    }
2075
2076    return pass;
2077}
2078
2079/* For some analyses, we need to know about all ids referenced by the static call tree of a particular
2080 * entrypoint. This is important for identifying the set of shader resources actually used by an entrypoint,
2081 * for example.
2082 * Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
2083 *  - NOT the shader input/output interfaces.
2084 *
2085 * TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
2086 * converting parts of this to be generated from the machine-readable spec instead.
2087 */
2088static std::unordered_set<uint32_t> mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint) {
2089    std::unordered_set<uint32_t> ids;
2090    std::unordered_set<uint32_t> worklist;
2091    worklist.insert(entrypoint.word(2));
2092
2093    while (!worklist.empty()) {
2094        auto id_iter = worklist.begin();
2095        auto id = *id_iter;
2096        worklist.erase(id_iter);
2097
2098        auto insn = src->get_def(id);
2099        if (insn == src->end()) {
2100            /* id is something we didn't collect in build_def_index. that's OK -- we'll stumble
2101             * across all kinds of things here that we may not care about. */
2102            continue;
2103        }
2104
2105        /* try to add to the output set */
2106        if (!ids.insert(id).second) {
2107            continue; /* if we already saw this id, we don't want to walk it again. */
2108        }
2109
2110        switch (insn.opcode()) {
2111        case spv::OpFunction:
2112            /* scan whole body of the function, enlisting anything interesting */
2113            while (++insn, insn.opcode() != spv::OpFunctionEnd) {
2114                switch (insn.opcode()) {
2115                case spv::OpLoad:
2116                case spv::OpAtomicLoad:
2117                case spv::OpAtomicExchange:
2118                case spv::OpAtomicCompareExchange:
2119                case spv::OpAtomicCompareExchangeWeak:
2120                case spv::OpAtomicIIncrement:
2121                case spv::OpAtomicIDecrement:
2122                case spv::OpAtomicIAdd:
2123                case spv::OpAtomicISub:
2124                case spv::OpAtomicSMin:
2125                case spv::OpAtomicUMin:
2126                case spv::OpAtomicSMax:
2127                case spv::OpAtomicUMax:
2128                case spv::OpAtomicAnd:
2129                case spv::OpAtomicOr:
2130                case spv::OpAtomicXor:
2131                    worklist.insert(insn.word(3)); /* ptr */
2132                    break;
2133                case spv::OpStore:
2134                case spv::OpAtomicStore:
2135                    worklist.insert(insn.word(1)); /* ptr */
2136                    break;
2137                case spv::OpAccessChain:
2138                case spv::OpInBoundsAccessChain:
2139                    worklist.insert(insn.word(3)); /* base ptr */
2140                    break;
2141                case spv::OpSampledImage:
2142                case spv::OpImageSampleImplicitLod:
2143                case spv::OpImageSampleExplicitLod:
2144                case spv::OpImageSampleDrefImplicitLod:
2145                case spv::OpImageSampleDrefExplicitLod:
2146                case spv::OpImageSampleProjImplicitLod:
2147                case spv::OpImageSampleProjExplicitLod:
2148                case spv::OpImageSampleProjDrefImplicitLod:
2149                case spv::OpImageSampleProjDrefExplicitLod:
2150                case spv::OpImageFetch:
2151                case spv::OpImageGather:
2152                case spv::OpImageDrefGather:
2153                case spv::OpImageRead:
2154                case spv::OpImage:
2155                case spv::OpImageQueryFormat:
2156                case spv::OpImageQueryOrder:
2157                case spv::OpImageQuerySizeLod:
2158                case spv::OpImageQuerySize:
2159                case spv::OpImageQueryLod:
2160                case spv::OpImageQueryLevels:
2161                case spv::OpImageQuerySamples:
2162                case spv::OpImageSparseSampleImplicitLod:
2163                case spv::OpImageSparseSampleExplicitLod:
2164                case spv::OpImageSparseSampleDrefImplicitLod:
2165                case spv::OpImageSparseSampleDrefExplicitLod:
2166                case spv::OpImageSparseSampleProjImplicitLod:
2167                case spv::OpImageSparseSampleProjExplicitLod:
2168                case spv::OpImageSparseSampleProjDrefImplicitLod:
2169                case spv::OpImageSparseSampleProjDrefExplicitLod:
2170                case spv::OpImageSparseFetch:
2171                case spv::OpImageSparseGather:
2172                case spv::OpImageSparseDrefGather:
2173                case spv::OpImageTexelPointer:
2174                    worklist.insert(insn.word(3)); /* image or sampled image */
2175                    break;
2176                case spv::OpImageWrite:
2177                    worklist.insert(insn.word(1)); /* image -- different operand order to above */
2178                    break;
2179                case spv::OpFunctionCall:
2180                    for (uint32_t i = 3; i < insn.len(); i++) {
2181                        worklist.insert(insn.word(i)); /* fn itself, and all args */
2182                    }
2183                    break;
2184
2185                case spv::OpExtInst:
2186                    for (uint32_t i = 5; i < insn.len(); i++) {
2187                        worklist.insert(insn.word(i)); /* operands to ext inst */
2188                    }
2189                    break;
2190                }
2191            }
2192            break;
2193        }
2194    }
2195
2196    return ids;
2197}
2198
2199static bool validate_push_constant_block_against_pipeline(debug_report_data *report_data,
2200                                                          std::vector<VkPushConstantRange> const *push_constant_ranges,
2201                                                          shader_module const *src, spirv_inst_iter type,
2202                                                          VkShaderStageFlagBits stage) {
2203    bool pass = true;
2204
2205    /* strip off ptrs etc */
2206    type = get_struct_type(src, type, false);
2207    assert(type != src->end());
2208
2209    /* validate directly off the offsets. this isn't quite correct for arrays
2210     * and matrices, but is a good first step. TODO: arrays, matrices, weird
2211     * sizes */
2212    for (auto insn : *src) {
2213        if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
2214
2215            if (insn.word(3) == spv::DecorationOffset) {
2216                unsigned offset = insn.word(4);
2217                auto size = 4; /* bytes; TODO: calculate this based on the type */
2218
2219                bool found_range = false;
2220                for (auto const &range : *push_constant_ranges) {
2221                    if (range.offset <= offset && range.offset + range.size >= offset + size) {
2222                        found_range = true;
2223
2224                        if ((range.stageFlags & stage) == 0) {
2225                            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2226                                        __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2227                                        "Push constant range covering variable starting at "
2228                                        "offset %u not accessible from stage %s",
2229                                        offset, string_VkShaderStageFlagBits(stage))) {
2230                                pass = false;
2231                            }
2232                        }
2233
2234                        break;
2235                    }
2236                }
2237
2238                if (!found_range) {
2239                    if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2240                                __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
2241                                "Push constant range covering variable starting at "
2242                                "offset %u not declared in layout",
2243                                offset)) {
2244                        pass = false;
2245                    }
2246                }
2247            }
2248        }
2249    }
2250
2251    return pass;
2252}
2253
2254static bool validate_push_constant_usage(debug_report_data *report_data,
2255                                         std::vector<VkPushConstantRange> const *push_constant_ranges, shader_module const *src,
2256                                         std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2257    bool pass = true;
2258
2259    for (auto id : accessible_ids) {
2260        auto def_insn = src->get_def(id);
2261        if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
2262            pass &= validate_push_constant_block_against_pipeline(report_data, push_constant_ranges, src,
2263                                                                  src->get_def(def_insn.word(1)), stage);
2264        }
2265    }
2266
2267    return pass;
2268}
2269
2270// For given pipelineLayout verify that the set_layout_node at slot.first
2271//  has the requested binding at slot.second and return ptr to that binding
2272static VkDescriptorSetLayoutBinding const * get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout, descriptor_slot_t slot) {
2273
2274    if (!pipelineLayout)
2275        return nullptr;
2276
2277    if (slot.first >= pipelineLayout->set_layouts.size())
2278        return nullptr;
2279
2280    return pipelineLayout->set_layouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
2281}
2282
2283// Block of code at start here for managing/tracking Pipeline state that this layer cares about
2284
2285static uint64_t g_drawCount[NUM_DRAW_TYPES] = {0, 0, 0, 0};
2286
2287// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2288//   Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2289//   to that same cmd buffer by separate thread are not changing state from underneath us
2290// Track the last cmd buffer touched by this thread
2291
2292static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) {
2293    for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2294        if (pCB->drawCount[i])
2295            return true;
2296    }
2297    return false;
2298}
2299
2300// Check object status for selected flag state
2301static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
2302                            DRAW_STATE_ERROR error_code, const char *fail_msg) {
2303    if (!(pNode->status & status_mask)) {
2304        return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
2305                       reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, error_code, "DS",
2306                       "CB object 0x%" PRIxLEAST64 ": %s", reinterpret_cast<const uint64_t &>(pNode->commandBuffer), fail_msg);
2307    }
2308    return false;
2309}
2310
2311// Retrieve pipeline node ptr for given pipeline object
2312static PIPELINE_NODE *getPipeline(layer_data const *my_data, VkPipeline pipeline) {
2313    auto it = my_data->pipelineMap.find(pipeline);
2314    if (it == my_data->pipelineMap.end()) {
2315        return nullptr;
2316    }
2317    return it->second;
2318}
2319
2320static RENDER_PASS_NODE *getRenderPass(layer_data const *my_data, VkRenderPass renderpass) {
2321    auto it = my_data->renderPassMap.find(renderpass);
2322    if (it == my_data->renderPassMap.end()) {
2323        return nullptr;
2324    }
2325    return it->second.get();
2326}
2327
2328static FRAMEBUFFER_NODE *getFramebuffer(const layer_data *my_data, VkFramebuffer framebuffer) {
2329    auto it = my_data->frameBufferMap.find(framebuffer);
2330    if (it == my_data->frameBufferMap.end()) {
2331        return nullptr;
2332    }
2333    return it->second.get();
2334}
2335
2336cvdescriptorset::DescriptorSetLayout const *getDescriptorSetLayout(layer_data const *my_data, VkDescriptorSetLayout dsLayout) {
2337    auto it = my_data->descriptorSetLayoutMap.find(dsLayout);
2338    if (it == my_data->descriptorSetLayoutMap.end()) {
2339        return nullptr;
2340    }
2341    return it->second;
2342}
2343
2344static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *my_data, VkPipelineLayout pipeLayout) {
2345    auto it = my_data->pipelineLayoutMap.find(pipeLayout);
2346    if (it == my_data->pipelineLayoutMap.end()) {
2347        return nullptr;
2348    }
2349    return &it->second;
2350}
2351
2352// Return true if for a given PSO, the given state enum is dynamic, else return false
2353static bool isDynamic(const PIPELINE_NODE *pPipeline, const VkDynamicState state) {
2354    if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2355        for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2356            if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
2357                return true;
2358        }
2359    }
2360    return false;
2361}
2362
2363// Validate state stored as flags at time of draw call
2364static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe, bool indexedDraw) {
2365    bool result = false;
2366    if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
2367        ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2368         (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
2369        result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2370                                  DRAWSTATE_LINE_WIDTH_NOT_BOUND, "Dynamic line width state not set for this command buffer");
2371    }
2372    if (pPipe->graphicsPipelineCI.pRasterizationState &&
2373        (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
2374        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2375                                  DRAWSTATE_DEPTH_BIAS_NOT_BOUND, "Dynamic depth bias state not set for this command buffer");
2376    }
2377    if (pPipe->blendConstantsEnabled) {
2378        result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2379                                  DRAWSTATE_BLEND_NOT_BOUND, "Dynamic blend constants state not set for this command buffer");
2380    }
2381    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2382        (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
2383        result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2384                                  DRAWSTATE_DEPTH_BOUNDS_NOT_BOUND, "Dynamic depth bounds state not set for this command buffer");
2385    }
2386    if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2387        (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
2388        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2389                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil read mask state not set for this command buffer");
2390        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2391                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil write mask state not set for this command buffer");
2392        result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2393                                  DRAWSTATE_STENCIL_NOT_BOUND, "Dynamic stencil reference state not set for this command buffer");
2394    }
2395    if (indexedDraw) {
2396        result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
2397                                  DRAWSTATE_INDEX_BUFFER_NOT_BOUND,
2398                                  "Index buffer object not bound to this command buffer when Indexed Draw attempted");
2399    }
2400    return result;
2401}
2402
2403// Verify attachment reference compatibility according to spec
2404//  If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
2405//  If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions
2406//   to make sure that format and samples counts match.
2407//  If not, they are not compatible.
2408static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2409                                             const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2410                                             const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2411                                             const VkAttachmentDescription *pSecondaryAttachments) {
2412    // Check potential NULL cases first to avoid nullptr issues later
2413    if (pPrimary == nullptr) {
2414        if (pSecondary == nullptr) {
2415            return true;
2416        }
2417        return false;
2418    } else if (pSecondary == nullptr) {
2419        return false;
2420    }
2421    if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
2422        if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2423            return true;
2424    } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
2425        if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2426            return true;
2427    } else { // Format and sample count must match
2428        if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2429            return true;
2430        } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2431            return false;
2432        }
2433        if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2434             pSecondaryAttachments[pSecondary[index].attachment].format) &&
2435            (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2436             pSecondaryAttachments[pSecondary[index].attachment].samples))
2437            return true;
2438    }
2439    // Format and sample counts didn't match
2440    return false;
2441}
2442// TODO : Scrub verify_renderpass_compatibility() and validateRenderPassCompatibility() and unify them and/or share code
2443// For given primary RenderPass object and secondry RenderPassCreateInfo, verify that they're compatible
2444static bool verify_renderpass_compatibility(const layer_data *my_data, const VkRenderPassCreateInfo *primaryRPCI,
2445                                            const VkRenderPassCreateInfo *secondaryRPCI, string &errorMsg) {
2446    if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
2447        stringstream errorStr;
2448        errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2449                 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2450        errorMsg = errorStr.str();
2451        return false;
2452    }
2453    uint32_t spIndex = 0;
2454    for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2455        // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2456        uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2457        uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2458        uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2459        for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2460            if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2461                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2462                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2463                stringstream errorStr;
2464                errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2465                errorMsg = errorStr.str();
2466                return false;
2467            } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2468                                                         primaryColorCount, primaryRPCI->pAttachments,
2469                                                         secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2470                                                         secondaryColorCount, secondaryRPCI->pAttachments)) {
2471                stringstream errorStr;
2472                errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2473                errorMsg = errorStr.str();
2474                return false;
2475            }
2476        }
2477
2478        if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2479                                              1, primaryRPCI->pAttachments,
2480                                              secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2481                                              1, secondaryRPCI->pAttachments)) {
2482            stringstream errorStr;
2483            errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2484            errorMsg = errorStr.str();
2485            return false;
2486        }
2487
2488        uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2489        uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2490        uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2491        for (uint32_t i = 0; i < inputMax; ++i) {
2492            if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2493                                                  primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2494                                                  secondaryColorCount, secondaryRPCI->pAttachments)) {
2495                stringstream errorStr;
2496                errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2497                errorMsg = errorStr.str();
2498                return false;
2499            }
2500        }
2501    }
2502    return true;
2503}
2504
2505// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
2506// pipelineLayout[layoutIndex]
2507static bool verify_set_layout_compatibility(layer_data *my_data, const cvdescriptorset::DescriptorSet *pSet,
2508                                            PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
2509                                            string &errorMsg) {
2510    auto num_sets = pipeline_layout->set_layouts.size();
2511    if (layoutIndex >= num_sets) {
2512        stringstream errorStr;
2513        errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
2514                 << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
2515                 << layoutIndex;
2516        errorMsg = errorStr.str();
2517        return false;
2518    }
2519    auto layout_node = pipeline_layout->set_layouts[layoutIndex];
2520    return pSet->IsCompatible(layout_node, &errorMsg);
2521}
2522
2523// Validate that data for each specialization entry is fully contained within the buffer.
2524static bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) {
2525    bool pass = true;
2526
2527    VkSpecializationInfo const *spec = info->pSpecializationInfo;
2528
2529    if (spec) {
2530        for (auto i = 0u; i < spec->mapEntryCount; i++) {
2531            if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
2532                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2533                            /*dev*/ 0, __LINE__, SHADER_CHECKER_BAD_SPECIALIZATION, "SC",
2534                            "Specialization entry %u (for constant id %u) references memory outside provided "
2535                            "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
2536                            " bytes provided)",
2537                            i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
2538                            spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize)) {
2539
2540                    pass = false;
2541                }
2542            }
2543        }
2544    }
2545
2546    return pass;
2547}
2548
2549static bool descriptor_type_match(shader_module const *module, uint32_t type_id,
2550                                  VkDescriptorType descriptor_type, unsigned &descriptor_count) {
2551    auto type = module->get_def(type_id);
2552
2553    descriptor_count = 1;
2554
2555    /* Strip off any array or ptrs. Where we remove array levels, adjust the
2556     * descriptor count for each dimension. */
2557    while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
2558        if (type.opcode() == spv::OpTypeArray) {
2559            descriptor_count *= get_constant_value(module, type.word(3));
2560            type = module->get_def(type.word(2));
2561        }
2562        else {
2563            type = module->get_def(type.word(3));
2564        }
2565    }
2566
2567    switch (type.opcode()) {
2568    case spv::OpTypeStruct: {
2569        for (auto insn : *module) {
2570            if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2571                if (insn.word(2) == spv::DecorationBlock) {
2572                    return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2573                           descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2574                } else if (insn.word(2) == spv::DecorationBufferBlock) {
2575                    return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2576                           descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2577                }
2578            }
2579        }
2580
2581        /* Invalid */
2582        return false;
2583    }
2584
2585    case spv::OpTypeSampler:
2586        return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER ||
2587            descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2588
2589    case spv::OpTypeSampledImage:
2590        if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
2591            /* Slight relaxation for some GLSL historical madness: samplerBuffer
2592             * doesn't really have a sampler, and a texel buffer descriptor
2593             * doesn't really provide one. Allow this slight mismatch.
2594             */
2595            auto image_type = module->get_def(type.word(2));
2596            auto dim = image_type.word(3);
2597            auto sampled = image_type.word(7);
2598            return dim == spv::DimBuffer && sampled == 1;
2599        }
2600        return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2601
2602    case spv::OpTypeImage: {
2603        /* Many descriptor types backing image types-- depends on dimension
2604         * and whether the image will be used with a sampler. SPIRV for
2605         * Vulkan requires that sampled be 1 or 2 -- leaving the decision to
2606         * runtime is unacceptable.
2607         */
2608        auto dim = type.word(3);
2609        auto sampled = type.word(7);
2610
2611        if (dim == spv::DimSubpassData) {
2612            return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2613        } else if (dim == spv::DimBuffer) {
2614            if (sampled == 1) {
2615                return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2616            } else {
2617                return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2618            }
2619        } else if (sampled == 1) {
2620            return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ||
2621                descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2622        } else {
2623            return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2624        }
2625    }
2626
2627    /* We shouldn't really see any other junk types -- but if we do, they're
2628     * a mismatch.
2629     */
2630    default:
2631        return false; /* Mismatch */
2632    }
2633}
2634
2635static bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) {
2636    if (!feature) {
2637        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2638                    __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
2639                    "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2640                    "enabled on the device",
2641                    feature_name)) {
2642            return false;
2643        }
2644    }
2645
2646    return true;
2647}
2648
2649static bool validate_shader_capabilities(debug_report_data *report_data, shader_module const *src,
2650                                         VkPhysicalDeviceFeatures const *enabledFeatures) {
2651    bool pass = true;
2652
2653
2654    for (auto insn : *src) {
2655        if (insn.opcode() == spv::OpCapability) {
2656            switch (insn.word(1)) {
2657            case spv::CapabilityMatrix:
2658            case spv::CapabilityShader:
2659            case spv::CapabilityInputAttachment:
2660            case spv::CapabilitySampled1D:
2661            case spv::CapabilityImage1D:
2662            case spv::CapabilitySampledBuffer:
2663            case spv::CapabilityImageBuffer:
2664            case spv::CapabilityImageQuery:
2665            case spv::CapabilityDerivativeControl:
2666                // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2667                break;
2668
2669            case spv::CapabilityGeometry:
2670                pass &= require_feature(report_data, enabledFeatures->geometryShader, "geometryShader");
2671                break;
2672
2673            case spv::CapabilityTessellation:
2674                pass &= require_feature(report_data, enabledFeatures->tessellationShader, "tessellationShader");
2675                break;
2676
2677            case spv::CapabilityFloat64:
2678                pass &= require_feature(report_data, enabledFeatures->shaderFloat64, "shaderFloat64");
2679                break;
2680
2681            case spv::CapabilityInt64:
2682                pass &= require_feature(report_data, enabledFeatures->shaderInt64, "shaderInt64");
2683                break;
2684
2685            case spv::CapabilityTessellationPointSize:
2686            case spv::CapabilityGeometryPointSize:
2687                pass &= require_feature(report_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
2688                                        "shaderTessellationAndGeometryPointSize");
2689                break;
2690
2691            case spv::CapabilityImageGatherExtended:
2692                pass &= require_feature(report_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
2693                break;
2694
2695            case spv::CapabilityStorageImageMultisample:
2696                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2697                break;
2698
2699            case spv::CapabilityUniformBufferArrayDynamicIndexing:
2700                pass &= require_feature(report_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
2701                                        "shaderUniformBufferArrayDynamicIndexing");
2702                break;
2703
2704            case spv::CapabilitySampledImageArrayDynamicIndexing:
2705                pass &= require_feature(report_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
2706                                        "shaderSampledImageArrayDynamicIndexing");
2707                break;
2708
2709            case spv::CapabilityStorageBufferArrayDynamicIndexing:
2710                pass &= require_feature(report_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
2711                                        "shaderStorageBufferArrayDynamicIndexing");
2712                break;
2713
2714            case spv::CapabilityStorageImageArrayDynamicIndexing:
2715                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
2716                                        "shaderStorageImageArrayDynamicIndexing");
2717                break;
2718
2719            case spv::CapabilityClipDistance:
2720                pass &= require_feature(report_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
2721                break;
2722
2723            case spv::CapabilityCullDistance:
2724                pass &= require_feature(report_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
2725                break;
2726
2727            case spv::CapabilityImageCubeArray:
2728                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2729                break;
2730
2731            case spv::CapabilitySampleRateShading:
2732                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2733                break;
2734
2735            case spv::CapabilitySparseResidency:
2736                pass &= require_feature(report_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
2737                break;
2738
2739            case spv::CapabilityMinLod:
2740                pass &= require_feature(report_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
2741                break;
2742
2743            case spv::CapabilitySampledCubeArray:
2744                pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
2745                break;
2746
2747            case spv::CapabilityImageMSArray:
2748                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
2749                break;
2750
2751            case spv::CapabilityStorageImageExtendedFormats:
2752                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageExtendedFormats,
2753                                        "shaderStorageImageExtendedFormats");
2754                break;
2755
2756            case spv::CapabilityInterpolationFunction:
2757                pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
2758                break;
2759
2760            case spv::CapabilityStorageImageReadWithoutFormat:
2761                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
2762                                        "shaderStorageImageReadWithoutFormat");
2763                break;
2764
2765            case spv::CapabilityStorageImageWriteWithoutFormat:
2766                pass &= require_feature(report_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
2767                                        "shaderStorageImageWriteWithoutFormat");
2768                break;
2769
2770            case spv::CapabilityMultiViewport:
2771                pass &= require_feature(report_data, enabledFeatures->multiViewport, "multiViewport");
2772                break;
2773
2774            default:
2775                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2776                            __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2777                            "Shader declares capability %u, not supported in Vulkan.",
2778                            insn.word(1)))
2779                    pass = false;
2780                break;
2781            }
2782        }
2783    }
2784
2785    return pass;
2786}
2787
2788
2789static uint32_t descriptor_type_to_reqs(shader_module const *module, uint32_t type_id) {
2790    auto type = module->get_def(type_id);
2791
2792    while (true) {
2793        switch (type.opcode()) {
2794        case spv::OpTypeArray:
2795        case spv::OpTypeSampledImage:
2796            type = module->get_def(type.word(2));
2797            break;
2798        case spv::OpTypePointer:
2799            type = module->get_def(type.word(3));
2800            break;
2801        case spv::OpTypeImage: {
2802            auto dim = type.word(3);
2803            auto arrayed = type.word(5);
2804            auto msaa = type.word(6);
2805
2806            switch (dim) {
2807            case spv::Dim1D:
2808                return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_1D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_1D;
2809            case spv::Dim2D:
2810                return (msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE) |
2811                    (arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_2D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_2D);
2812            case spv::Dim3D:
2813                return DESCRIPTOR_REQ_VIEW_TYPE_3D;
2814            case spv::DimCube:
2815                return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_CUBE_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_CUBE;
2816            case spv::DimSubpassData:
2817                return msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE;
2818            default:  // buffer, etc.
2819                return 0;
2820            }
2821        }
2822        default:
2823            return 0;
2824        }
2825    }
2826}
2827
2828
2829static bool validate_pipeline_shader_stage(debug_report_data *report_data,
2830                                           VkPipelineShaderStageCreateInfo const *pStage,
2831                                           PIPELINE_NODE *pipeline,
2832                                           shader_module **out_module,
2833                                           spirv_inst_iter *out_entrypoint,
2834                                           VkPhysicalDeviceFeatures const *enabledFeatures,
2835                                           std::unordered_map<VkShaderModule,
2836                                           std::unique_ptr<shader_module>> const &shaderModuleMap) {
2837    bool pass = true;
2838    auto module_it = shaderModuleMap.find(pStage->module);
2839    auto module = *out_module = module_it->second.get();
2840
2841    /* find the entrypoint */
2842    auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2843    if (entrypoint == module->end()) {
2844        if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2845                    __LINE__, SHADER_CHECKER_MISSING_ENTRYPOINT, "SC",
2846                    "No entrypoint found named `%s` for stage %s", pStage->pName,
2847                    string_VkShaderStageFlagBits(pStage->stage))) {
2848            return false;   // no point continuing beyond here, any analysis is just going to be garbage.
2849        }
2850    }
2851
2852    /* validate shader capabilities against enabled device features */
2853    pass &= validate_shader_capabilities(report_data, module, enabledFeatures);
2854
2855    /* mark accessible ids */
2856    auto accessible_ids = mark_accessible_ids(module, entrypoint);
2857
2858    /* validate descriptor set layout against what the entrypoint actually uses */
2859    auto descriptor_uses = collect_interface_by_descriptor_slot(report_data, module, accessible_ids);
2860
2861    auto pipelineLayout = pipeline->pipeline_layout;
2862
2863    pass &= validate_specialization_offsets(report_data, pStage);
2864    pass &= validate_push_constant_usage(report_data, &pipelineLayout.push_constant_ranges, module, accessible_ids, pStage->stage);
2865
2866    /* validate descriptor use */
2867    for (auto use : descriptor_uses) {
2868        // While validating shaders capture which slots are used by the pipeline
2869        auto & reqs = pipeline->active_slots[use.first.first][use.first.second];
2870        reqs = descriptor_req(reqs | descriptor_type_to_reqs(module, use.second.type_id));
2871
2872        /* verify given pipelineLayout has requested setLayout with requested binding */
2873        const auto &binding = get_descriptor_binding(&pipelineLayout, use.first);
2874        unsigned required_descriptor_count;
2875
2876        if (!binding) {
2877            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2878                        __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
2879                        "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2880                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
2881                pass = false;
2882            }
2883        } else if (~binding->stageFlags & pStage->stage) {
2884            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
2885                        /*dev*/ 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
2886                        "Shader uses descriptor slot %u.%u (used "
2887                        "as type `%s`) but descriptor not "
2888                        "accessible from stage %s",
2889                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2890                        string_VkShaderStageFlagBits(pStage->stage))) {
2891                pass = false;
2892            }
2893        } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType,
2894                                          /*out*/ required_descriptor_count)) {
2895            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2896                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", "Type mismatch on descriptor slot "
2897                                                                       "%u.%u (used as type `%s`) but "
2898                                                                       "descriptor of type %s",
2899                        use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
2900                        string_VkDescriptorType(binding->descriptorType))) {
2901                pass = false;
2902            }
2903        } else if (binding->descriptorCount < required_descriptor_count) {
2904            if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2905                        SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
2906                        "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2907                        required_descriptor_count, use.first.first, use.first.second,
2908                        describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
2909                pass = false;
2910            }
2911        }
2912    }
2913
2914    /* validate use of input attachments against subpass structure */
2915    if (pStage->stage == VK_SHADER_STAGE_FRAGMENT_BIT) {
2916        auto input_attachment_uses = collect_interface_by_input_attachment_index(report_data, module, accessible_ids);
2917
2918        auto rpci = pipeline->render_pass_ci.ptr();
2919        auto subpass = pipeline->graphicsPipelineCI.subpass;
2920
2921        for (auto use : input_attachment_uses) {
2922            auto input_attachments = rpci->pSubpasses[subpass].pInputAttachments;
2923            auto index = (input_attachments && use.first < rpci->pSubpasses[subpass].inputAttachmentCount) ?
2924                    input_attachments[use.first].attachment : VK_ATTACHMENT_UNUSED;
2925
2926            if (index == VK_ATTACHMENT_UNUSED) {
2927                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2928                            SHADER_CHECKER_MISSING_INPUT_ATTACHMENT, "SC",
2929                            "Shader consumes input attachment index %d but not provided in subpass",
2930                            use.first)) {
2931                    pass = false;
2932                }
2933            }
2934            else if (get_format_type(rpci->pAttachments[index].format) !=
2935                    get_fundamental_type(module, use.second.type_id)) {
2936                if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2937                            SHADER_CHECKER_INPUT_ATTACHMENT_TYPE_MISMATCH, "SC",
2938                            "Subpass input attachment %u format of %s does not match type used in shader `%s`",
2939                            use.first, string_VkFormat(rpci->pAttachments[index].format),
2940                            describe_type(module, use.second.type_id).c_str())) {
2941                    pass = false;
2942                }
2943            }
2944        }
2945    }
2946
2947    return pass;
2948}
2949
2950
2951// Validate that the shaders used by the given pipeline and store the active_slots
2952//  that are actually used by the pipeline into pPipeline->active_slots
2953static bool validate_and_capture_pipeline_shader_state(debug_report_data *report_data, PIPELINE_NODE *pPipeline,
2954                                                       VkPhysicalDeviceFeatures const *enabledFeatures,
2955                                                       std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
2956    auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr();
2957    int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2958    int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2959
2960    shader_module *shaders[5];
2961    memset(shaders, 0, sizeof(shaders));
2962    spirv_inst_iter entrypoints[5];
2963    memset(entrypoints, 0, sizeof(entrypoints));
2964    VkPipelineVertexInputStateCreateInfo const *vi = 0;
2965    bool pass = true;
2966
2967    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
2968        auto pStage = &pCreateInfo->pStages[i];
2969        auto stage_id = get_shader_stage_id(pStage->stage);
2970        pass &= validate_pipeline_shader_stage(report_data, pStage, pPipeline,
2971                                               &shaders[stage_id], &entrypoints[stage_id],
2972                                               enabledFeatures, shaderModuleMap);
2973    }
2974
2975    // if the shader stages are no good individually, cross-stage validation is pointless.
2976    if (!pass)
2977        return false;
2978
2979    vi = pCreateInfo->pVertexInputState;
2980
2981    if (vi) {
2982        pass &= validate_vi_consistency(report_data, vi);
2983    }
2984
2985    if (shaders[vertex_stage]) {
2986        pass &= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
2987    }
2988
2989    int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2990    int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2991
2992    while (!shaders[producer] && producer != fragment_stage) {
2993        producer++;
2994        consumer++;
2995    }
2996
2997    for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2998        assert(shaders[producer]);
2999        if (shaders[consumer]) {
3000            pass &= validate_interface_between_stages(report_data,
3001                                                      shaders[producer], entrypoints[producer], &shader_stage_attribs[producer],
3002                                                      shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]);
3003
3004            producer = consumer;
3005        }
3006    }
3007
3008    if (shaders[fragment_stage]) {
3009        pass &= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage],
3010                                                        pPipeline->render_pass_ci.ptr(), pCreateInfo->subpass);
3011    }
3012
3013    return pass;
3014}
3015
3016static bool validate_compute_pipeline(debug_report_data *report_data, PIPELINE_NODE *pPipeline, VkPhysicalDeviceFeatures const *enabledFeatures,
3017                                      std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const & shaderModuleMap) {
3018    auto pCreateInfo = pPipeline->computePipelineCI.ptr();
3019
3020    shader_module *module;
3021    spirv_inst_iter entrypoint;
3022
3023    return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline,
3024                                          &module, &entrypoint, enabledFeatures, shaderModuleMap);
3025}
3026// Return Set node ptr for specified set or else NULL
3027cvdescriptorset::DescriptorSet *getSetNode(const layer_data *my_data, VkDescriptorSet set) {
3028    auto set_it = my_data->setMap.find(set);
3029    if (set_it == my_data->setMap.end()) {
3030        return NULL;
3031    }
3032    return set_it->second;
3033}
3034// For the given command buffer, verify and update the state for activeSetBindingsPairs
3035//  This includes:
3036//  1. Verifying that any dynamic descriptor in that set has a valid dynamic offset bound.
3037//     To be valid, the dynamic offset combined with the offset and range from its
3038//     descriptor update must not overflow the size of its buffer being updated
3039//  2. Grow updateImages for given pCB to include any bound STORAGE_IMAGE descriptor images
3040//  3. Grow updateBuffers for pCB to include buffers from STORAGE*_BUFFER descriptor buffers
3041static bool validate_and_update_drawtime_descriptor_state(
3042    layer_data *dev_data, GLOBAL_CB_NODE *pCB,
3043    const vector<std::tuple<cvdescriptorset::DescriptorSet *, std::map<uint32_t, descriptor_req>, std::vector<uint32_t> const *>>
3044        &activeSetBindingsPairs,
3045    const char *function) {
3046    bool result = false;
3047    for (auto set_bindings_pair : activeSetBindingsPairs) {
3048        cvdescriptorset::DescriptorSet *set_node = std::get<0>(set_bindings_pair);
3049        std::string err_str;
3050        if (!set_node->ValidateDrawState(std::get<1>(set_bindings_pair), *std::get<2>(set_bindings_pair),
3051                                         &err_str)) {
3052            // Report error here
3053            auto set = set_node->GetSet();
3054            result |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3055                              reinterpret_cast<const uint64_t &>(set), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
3056                              "DS 0x%" PRIxLEAST64 " encountered the following validation error at %s() time: %s",
3057                              reinterpret_cast<const uint64_t &>(set), function, err_str.c_str());
3058        }
3059        set_node->GetStorageUpdates(std::get<1>(set_bindings_pair), &pCB->updateBuffers, &pCB->updateImages);
3060    }
3061    return result;
3062}
3063
3064// For given pipeline, return number of MSAA samples, or one if MSAA disabled
3065static VkSampleCountFlagBits getNumSamples(PIPELINE_NODE const *pipe) {
3066    if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
3067        VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
3068        return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
3069    }
3070    return VK_SAMPLE_COUNT_1_BIT;
3071}
3072
3073static void list_bits(std::ostream& s, uint32_t bits) {
3074    for (int i = 0; i < 32 && bits; i++) {
3075        if (bits & (1 << i)) {
3076            s << i;
3077            bits &= ~(1 << i);
3078            if (bits) {
3079                s << ",";
3080            }
3081        }
3082    }
3083}
3084
3085// Validate draw-time state related to the PSO
3086static bool validatePipelineDrawtimeState(layer_data const *my_data,
3087                                          LAST_BOUND_STATE const &state,
3088                                          const GLOBAL_CB_NODE *pCB,
3089                                          PIPELINE_NODE const *pPipeline) {
3090    bool skip_call = false;
3091
3092    // Verify Vtx binding
3093    if (pPipeline->vertexBindingDescriptions.size() > 0) {
3094        for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
3095            auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
3096            if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
3097                (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
3098                skip_call |= log_msg(
3099                    my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3100                    DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
3101                    "The Pipeline State Object (0x%" PRIxLEAST64 ") expects that this Command Buffer's vertex binding Index %u "
3102                    "should be set via vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct "
3103                    "at index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
3104                    (uint64_t)state.pipeline_node->pipeline, vertex_binding, i, vertex_binding);
3105            }
3106        }
3107    } else {
3108        if (!pCB->currentDrawData.buffers.empty()) {
3109            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
3110                                 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
3111                                 "Vertex buffers are bound to command buffer (0x%" PRIxLEAST64
3112                                 ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
3113                                 (uint64_t)pCB->commandBuffer, (uint64_t)state.pipeline_node->pipeline);
3114        }
3115    }
3116    // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
3117    // Skip check if rasterization is disabled or there is no viewport.
3118    if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
3119         (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
3120        pPipeline->graphicsPipelineCI.pViewportState) {
3121        bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3122        bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3123
3124        if (dynViewport) {
3125            auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
3126            auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
3127            if (missingViewportMask) {
3128                std::stringstream ss;
3129                ss << "Dynamic viewport(s) ";
3130                list_bits(ss, missingViewportMask);
3131                ss << " are used by PSO, but were not provided via calls to vkCmdSetViewport().";
3132                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3133                                     __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3134                                     "%s", ss.str().c_str());
3135            }
3136        }
3137
3138        if (dynScissor) {
3139            auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
3140            auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
3141            if (missingScissorMask) {
3142                std::stringstream ss;
3143                ss << "Dynamic scissor(s) ";
3144                list_bits(ss, missingScissorMask);
3145                ss << " are used by PSO, but were not provided via calls to vkCmdSetScissor().";
3146                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3147                                     __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3148                                     "%s", ss.str().c_str());
3149            }
3150        }
3151    }
3152
3153    // Verify that any MSAA request in PSO matches sample# in bound FB
3154    // Skip the check if rasterization is disabled.
3155    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3156        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3157        VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
3158        if (pCB->activeRenderPass) {
3159            auto const render_pass_info = pCB->activeRenderPass->createInfo.ptr();
3160            const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
3161            uint32_t i;
3162
3163            const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
3164            if ((color_blend_state != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) &&
3165                (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount)) {
3166                skip_call |=
3167                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3168                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
3169                                "Render pass subpass %u mismatch with blending state defined and blend state attachment "
3170                                "count %u while subpass color attachment count %u in Pipeline (0x%" PRIxLEAST64 ")!  These "
3171                                "must be the same at draw-time.",
3172                                pCB->activeSubpass, color_blend_state->attachmentCount, subpass_desc->colorAttachmentCount,
3173                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
3174            }
3175
3176            unsigned subpass_num_samples = 0;
3177
3178            for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
3179                auto attachment = subpass_desc->pColorAttachments[i].attachment;
3180                if (attachment != VK_ATTACHMENT_UNUSED)
3181                    subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
3182            }
3183
3184            if (subpass_desc->pDepthStencilAttachment &&
3185                subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3186                auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
3187                subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
3188            }
3189
3190            if (subpass_num_samples && static_cast<unsigned>(pso_num_samples) != subpass_num_samples) {
3191                skip_call |=
3192                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3193                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3194                                "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
3195                                ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
3196                                reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples,
3197                                reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples);
3198            }
3199        } else {
3200            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3201                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
3202                                 "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
3203                                 reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
3204        }
3205    }
3206    // Verify that PSO creation renderPass is compatible with active renderPass
3207    if (pCB->activeRenderPass) {
3208        std::string err_string;
3209        if ((pCB->activeRenderPass->renderPass != pPipeline->graphicsPipelineCI.renderPass) &&
3210            !verify_renderpass_compatibility(my_data, pCB->activeRenderPass->createInfo.ptr(), pPipeline->render_pass_ci.ptr(),
3211                                             err_string)) {
3212            // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
3213            skip_call |=
3214                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3215                        reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
3216                        "At Draw time the active render pass (0x%" PRIxLEAST64 ") is incompatible w/ gfx pipeline "
3217                        "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
3218                        reinterpret_cast<uint64_t &>(pCB->activeRenderPass->renderPass), reinterpret_cast<uint64_t &>(pPipeline),
3219                        reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
3220        }
3221
3222        if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
3223            skip_call |=
3224                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
3225                        reinterpret_cast<uint64_t const &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
3226                        "Pipeline was built for subpass %u but used in subpass %u", pPipeline->graphicsPipelineCI.subpass,
3227                        pCB->activeSubpass);
3228        }
3229    }
3230    // TODO : Add more checks here
3231
3232    return skip_call;
3233}
3234
3235// Validate overall state at the time of a draw call
3236static bool validate_and_update_draw_state(layer_data *my_data, GLOBAL_CB_NODE *cb_node, const bool indexedDraw,
3237                                           const VkPipelineBindPoint bindPoint, const char *function) {
3238    bool result = false;
3239    auto const &state = cb_node->lastBound[bindPoint];
3240    PIPELINE_NODE *pPipe = state.pipeline_node;
3241    if (nullptr == pPipe) {
3242        result |= log_msg(
3243            my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
3244            DRAWSTATE_INVALID_PIPELINE, "DS",
3245            "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
3246        // Early return as any further checks below will be busted w/o a pipeline
3247        if (result)
3248            return true;
3249    }
3250    // First check flag states
3251    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
3252        result = validate_draw_state_flags(my_data, cb_node, pPipe, indexedDraw);
3253
3254    // Now complete other state checks
3255    if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
3256        string errorString;
3257        auto pipeline_layout = pPipe->pipeline_layout;
3258
3259        // Need a vector (vs. std::set) of active Sets for dynamicOffset validation in case same set bound w/ different offsets
3260        vector<std::tuple<cvdescriptorset::DescriptorSet *, std::map<uint32_t, descriptor_req>, std::vector<uint32_t> const *>>
3261            activeSetBindingsPairs;
3262        for (auto & setBindingPair : pPipe->active_slots) {
3263            uint32_t setIndex = setBindingPair.first;
3264            // If valid set is not bound throw an error
3265            if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
3266                result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3267                                  DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
3268                                  "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline,
3269                                  setIndex);
3270            } else if (!verify_set_layout_compatibility(my_data, state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex,
3271                                                        errorString)) {
3272                // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
3273                VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
3274                result |=
3275                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3276                            (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
3277                            "VkDescriptorSet (0x%" PRIxLEAST64
3278                            ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
3279                            reinterpret_cast<uint64_t &>(setHandle), setIndex, reinterpret_cast<uint64_t &>(pipeline_layout.layout),
3280                            errorString.c_str());
3281            } else { // Valid set is bound and layout compatible, validate that it's updated
3282                // Pull the set node
3283                cvdescriptorset::DescriptorSet *pSet = state.boundDescriptorSets[setIndex];
3284                // Gather active bindings
3285                std::unordered_set<uint32_t> bindings;
3286                for (auto binding : setBindingPair.second) {
3287                    bindings.insert(binding.first);
3288                }
3289                // Bind this set and its active descriptor resources to the command buffer
3290                pSet->BindCommandBuffer(cb_node, bindings);
3291                // Save vector of all active sets to verify dynamicOffsets below
3292                activeSetBindingsPairs.push_back(std::make_tuple(pSet, setBindingPair.second, &state.dynamicOffsets[setIndex]));
3293                // Make sure set has been updated if it has no immutable samplers
3294                //  If it has immutable samplers, we'll flag error later as needed depending on binding
3295                if (!pSet->IsUpdated()) {
3296                    for (auto binding : bindings) {
3297                        if (!pSet->GetImmutableSamplerPtrFromBinding(binding)) {
3298                            result |= log_msg(
3299                                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3300                                (uint64_t)pSet->GetSet(), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
3301                                "DS 0x%" PRIxLEAST64 " bound but it was never updated. It is now being used to draw so "
3302                                "this will result in undefined behavior.",
3303                                (uint64_t)pSet->GetSet());
3304                        }
3305                    }
3306                }
3307            }
3308        }
3309        // For given active slots, verify any dynamic descriptors and record updated images & buffers
3310        result |= validate_and_update_drawtime_descriptor_state(my_data, cb_node, activeSetBindingsPairs, function);
3311    }
3312
3313    // Check general pipeline state that needs to be validated at drawtime
3314    if (VK_PIPELINE_BIND_POINT_GRAPHICS == bindPoint)
3315        result |= validatePipelineDrawtimeState(my_data, state, cb_node, pPipe);
3316
3317    return result;
3318}
3319
3320// Validate HW line width capabilities prior to setting requested line width.
3321static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) {
3322    bool skip_call = false;
3323
3324    // First check to see if the physical device supports wide lines.
3325    if ((VK_FALSE == my_data->enabled_features.wideLines) && (1.0f != lineWidth)) {
3326        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__,
3327                             dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature "
3328                                            "not supported/enabled so lineWidth must be 1.0f!",
3329                             lineWidth);
3330    } else {
3331        // Otherwise, make sure the width falls in the valid range.
3332        if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
3333            (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
3334            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target,
3335                                 __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width "
3336                                                          "to between [%f, %f]!",
3337                                 lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0],
3338                                 my_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
3339        }
3340    }
3341
3342    return skip_call;
3343}
3344
3345// Verify that create state for a pipeline is valid
3346static bool verifyPipelineCreateState(layer_data *my_data, const VkDevice device, std::vector<PIPELINE_NODE *> pPipelines,
3347                                      int pipelineIndex) {
3348    bool skip_call = false;
3349
3350    PIPELINE_NODE *pPipeline = pPipelines[pipelineIndex];
3351
3352    // If create derivative bit is set, check that we've specified a base
3353    // pipeline correctly, and that the base pipeline was created to allow
3354    // derivatives.
3355    if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
3356        PIPELINE_NODE *pBasePipeline = nullptr;
3357        if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3358              (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
3359            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3360                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3361                                 "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
3362        } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3363            if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
3364                skip_call |=
3365                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3366                            DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3367                            "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline.");
3368            } else {
3369                pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3370            }
3371        } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
3372            pBasePipeline = getPipeline(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
3373        }
3374
3375        if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
3376            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3377                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3378                                 "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
3379        }
3380    }
3381
3382    if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
3383        if (!my_data->enabled_features.independentBlend) {
3384            if (pPipeline->attachments.size() > 1) {
3385                VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
3386                for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
3387                    // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
3388                    // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
3389                    // only attachment state, so memcmp is best suited for the comparison
3390                    if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
3391                               sizeof(pAttachments[0]))) {
3392                        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3393                                             __LINE__, DRAWSTATE_INDEPENDENT_BLEND, "DS",
3394                                             "Invalid Pipeline CreateInfo: If independent blend feature not "
3395                                             "enabled, all elements of pAttachments must be identical");
3396                        break;
3397                    }
3398                }
3399            }
3400        }
3401        if (!my_data->enabled_features.logicOp &&
3402            (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
3403            skip_call |=
3404                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3405                        DRAWSTATE_DISABLED_LOGIC_OP, "DS",
3406                        "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE");
3407        }
3408    }
3409
3410    // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
3411    // produces nonsense errors that confuse users. Other layers should already
3412    // emit errors for renderpass being invalid.
3413    auto renderPass = getRenderPass(my_data, pPipeline->graphicsPipelineCI.renderPass);
3414    if (renderPass && pPipeline->graphicsPipelineCI.subpass >= renderPass->createInfo.subpassCount) {
3415        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3416                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3417                                                                            "is out of range for this renderpass (0..%u)",
3418                             pPipeline->graphicsPipelineCI.subpass, renderPass->createInfo.subpassCount - 1);
3419    }
3420
3421    if (!validate_and_capture_pipeline_shader_state(my_data->report_data, pPipeline, &my_data->enabled_features,
3422                                                    my_data->shaderModuleMap)) {
3423        skip_call = true;
3424    }
3425    // Each shader's stage must be unique
3426    if (pPipeline->duplicate_shaders) {
3427        for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
3428            if (pPipeline->duplicate_shaders & stage) {
3429                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3430                                     __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3431                                     "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
3432                                     string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
3433            }
3434        }
3435    }
3436    // VS is required
3437    if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
3438        skip_call |=
3439            log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3440                    DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: Vtx Shader required");
3441    }
3442    // Either both or neither TC/TE shaders should be defined
3443    if (((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) == 0) !=
3444        ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) == 0)) {
3445        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3446                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3447                             "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair");
3448    }
3449    // Compute shaders should be specified independent of Gfx shaders
3450    if ((pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) &&
3451        (pPipeline->active_shaders &
3452         (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT |
3453          VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_FRAGMENT_BIT))) {
3454        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3455                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3456                             "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline");
3457    }
3458    // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3459    // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3460    if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
3461        (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3462         pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
3463        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3464                             DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3465                                                                            "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3466                                                                            "topology for tessellation pipelines");
3467    }
3468    if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3469        pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
3470        if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
3471            skip_call |=
3472                log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3473                        DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3474                                                                       "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3475                                                                       "topology is only valid for tessellation pipelines");
3476        }
3477        if (!pPipeline->graphicsPipelineCI.pTessellationState) {
3478            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3479                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3480                                 "Invalid Pipeline CreateInfo State: "
3481                                 "pTessellationState is NULL when VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3482                                 "topology used. pTessellationState must not be NULL in this case.");
3483        } else if (!pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints ||
3484                   (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints > 32)) {
3485            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3486                                 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS", "Invalid Pipeline CreateInfo State: "
3487                                                                                "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3488                                                                                "topology used with patchControlPoints value %u."
3489                                                                                " patchControlPoints should be >0 and <=32.",
3490                                 pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints);
3491        }
3492    }
3493    // If a rasterization state is provided, make sure that the line width conforms to the HW.
3494    if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3495        if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
3496            skip_call |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, reinterpret_cast<uint64_t &>(pPipeline),
3497                                         pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
3498        }
3499    }
3500    // Viewport state must be included if rasterization is enabled.
3501    // If the viewport state is included, the viewport and scissor counts should always match.
3502    // NOTE : Even if these are flagged as dynamic, counts need to be set correctly for shader compiler
3503    if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
3504        (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
3505        if (!pPipeline->graphicsPipelineCI.pViewportState) {
3506            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3507                                 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS", "Gfx Pipeline pViewportState is null. Even if viewport "
3508                                                                            "and scissors are dynamic PSO must include "
3509                                                                            "viewportCount and scissorCount in pViewportState.");
3510        } else if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount !=
3511                   pPipeline->graphicsPipelineCI.pViewportState->viewportCount) {
3512            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3513                                 DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3514                                 "Gfx Pipeline viewport count (%u) must match scissor count (%u).",
3515                                 pPipeline->graphicsPipelineCI.pViewportState->viewportCount,
3516                                 pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3517        } else {
3518            // If viewport or scissor are not dynamic, then verify that data is appropriate for count
3519            bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
3520            bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
3521            if (!dynViewport) {
3522                if (pPipeline->graphicsPipelineCI.pViewportState->viewportCount &&
3523                    !pPipeline->graphicsPipelineCI.pViewportState->pViewports) {
3524                    skip_call |=
3525                        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3526                                DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3527                                "Gfx Pipeline viewportCount is %u, but pViewports is NULL. For non-zero viewportCount, you "
3528                                "must either include pViewports data, or include viewport in pDynamicState and set it with "
3529                                "vkCmdSetViewport().",
3530                                pPipeline->graphicsPipelineCI.pViewportState->viewportCount);
3531                }
3532            }
3533            if (!dynScissor) {
3534                if (pPipeline->graphicsPipelineCI.pViewportState->scissorCount &&
3535                    !pPipeline->graphicsPipelineCI.pViewportState->pScissors) {
3536                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
3537                                         __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
3538                                         "Gfx Pipeline scissorCount is %u, but pScissors is NULL. For non-zero scissorCount, you "
3539                                         "must either include pScissors data, or include scissor in pDynamicState and set it with "
3540                                         "vkCmdSetScissor().",
3541                                         pPipeline->graphicsPipelineCI.pViewportState->scissorCount);
3542                }
3543            }
3544        }
3545
3546        // If rasterization is not disabled, and subpass uses a depth/stencil
3547        // attachment, pDepthStencilState must be a pointer to a valid structure
3548        auto subpass_desc = renderPass ? &renderPass->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass] : nullptr;
3549        if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
3550            subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3551            if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
3552                skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
3553                                     __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3554                                     "Invalid Pipeline CreateInfo State: "
3555                                     "pDepthStencilState is NULL when rasterization is enabled and subpass uses a "
3556                                     "depth/stencil attachment");
3557            }
3558        }
3559    }
3560    return skip_call;
3561}
3562
3563// Free the Pipeline nodes
3564static void deletePipelines(layer_data *my_data) {
3565    if (my_data->pipelineMap.size() <= 0)
3566        return;
3567    for (auto &pipe_map_pair : my_data->pipelineMap) {
3568        delete pipe_map_pair.second;
3569    }
3570    my_data->pipelineMap.clear();
3571}
3572
3573// Block of code at start here specifically for managing/tracking DSs
3574
3575// Return Pool node ptr for specified pool or else NULL
3576DESCRIPTOR_POOL_NODE *getPoolNode(const layer_data *dev_data, const VkDescriptorPool pool) {
3577    auto pool_it = dev_data->descriptorPoolMap.find(pool);
3578    if (pool_it == dev_data->descriptorPoolMap.end()) {
3579        return NULL;
3580    }
3581    return pool_it->second;
3582}
3583
3584// Return false if update struct is of valid type, otherwise flag error and return code from callback
3585static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3586    switch (pUpdateStruct->sType) {
3587    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3588    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3589        return false;
3590    default:
3591        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3592                       DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3593                       "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3594                       string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3595    }
3596}
3597
3598// Set count for given update struct in the last parameter
3599static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3600    switch (pUpdateStruct->sType) {
3601    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3602        return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3603    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3604        // TODO : Need to understand this case better and make sure code is correct
3605        return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3606    default:
3607        return 0;
3608    }
3609}
3610
3611// For given layout and update, return the first overall index of the layout that is updated
3612static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3613                                    const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3614    return binding_start_index + arrayIndex;
3615}
3616// For given layout and update, return the last overall index of the layout that is updated
3617static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
3618                                  const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3619    uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
3620    return binding_start_index + arrayIndex + count - 1;
3621}
3622// Verify that the descriptor type in the update struct matches what's expected by the layout
3623static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type,
3624                                      const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
3625    // First get actual type of update
3626    bool skip_call = false;
3627    VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM;
3628    switch (pUpdateStruct->sType) {
3629    case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3630        actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3631        break;
3632    case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3633        /* no need to validate */
3634        return false;
3635        break;
3636    default:
3637        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3638                             DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3639                             "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3640                             string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3641    }
3642    if (!skip_call) {
3643        if (layout_type != actualType) {
3644            skip_call |= log_msg(
3645                my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3646                DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3647                "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3648                string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type));
3649        }
3650    }
3651    return skip_call;
3652}
3653//TODO: Consolidate functions
3654bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
3655    layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
3656    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3657        return false;
3658    }
3659    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3660    imgpair.subresource.aspectMask = aspectMask;
3661    auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3662    if (imgsubIt == pCB->imageLayoutMap.end()) {
3663        return false;
3664    }
3665    if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
3666        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3667                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3668                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3669                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout));
3670    }
3671    if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
3672        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3673                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3674                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s",
3675                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout));
3676    }
3677    node = imgsubIt->second;
3678    return true;
3679}
3680
3681bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
3682    if (!(imgpair.subresource.aspectMask & aspectMask)) {
3683        return false;
3684    }
3685    VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3686    imgpair.subresource.aspectMask = aspectMask;
3687    auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3688    if (imgsubIt == my_data->imageLayoutMap.end()) {
3689        return false;
3690    }
3691    if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
3692        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3693                reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3694                "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3695                reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout));
3696    }
3697    layout = imgsubIt->second.layout;
3698    return true;
3699}
3700
3701// find layout(s) on the cmd buf level
3702bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3703    ImageSubresourcePair imgpair = {image, true, range};
3704    node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
3705    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
3706    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
3707    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
3708    FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
3709    if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3710        imgpair = {image, false, VkImageSubresource()};
3711        auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3712        if (imgsubIt == pCB->imageLayoutMap.end())
3713            return false;
3714        node = imgsubIt->second;
3715    }
3716    return true;
3717}
3718
3719// find layout(s) on the global level
3720bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3721    layout = VK_IMAGE_LAYOUT_MAX_ENUM;
3722    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3723    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3724    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3725    FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3726    if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3727        imgpair = {imgpair.image, false, VkImageSubresource()};
3728        auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3729        if (imgsubIt == my_data->imageLayoutMap.end())
3730            return false;
3731        layout = imgsubIt->second.layout;
3732    }
3733    return true;
3734}
3735
3736bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3737    ImageSubresourcePair imgpair = {image, true, range};
3738    return FindLayout(my_data, imgpair, layout);
3739}
3740
3741bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3742    auto sub_data = my_data->imageSubresourceMap.find(image);
3743    if (sub_data == my_data->imageSubresourceMap.end())
3744        return false;
3745    auto img_node = getImageNode(my_data, image);
3746    if (!img_node)
3747        return false;
3748    bool ignoreGlobal = false;
3749    // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3750    // potential errors in this case.
3751    if (sub_data->second.size() >= (img_node->createInfo.arrayLayers * img_node->createInfo.mipLevels + 1)) {
3752        ignoreGlobal = true;
3753    }
3754    for (auto imgsubpair : sub_data->second) {
3755        if (ignoreGlobal && !imgsubpair.hasSubresource)
3756            continue;
3757        auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3758        if (img_data != my_data->imageLayoutMap.end()) {
3759            layouts.push_back(img_data->second.layout);
3760        }
3761    }
3762    return true;
3763}
3764
3765// Set the layout on the global level
3766void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3767    VkImage &image = imgpair.image;
3768    // TODO (mlentine): Maybe set format if new? Not used atm.
3769    my_data->imageLayoutMap[imgpair].layout = layout;
3770    // TODO (mlentine): Maybe make vector a set?
3771    auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3772    if (subresource == my_data->imageSubresourceMap[image].end()) {
3773        my_data->imageSubresourceMap[image].push_back(imgpair);
3774    }
3775}
3776
3777// Set the layout on the cmdbuf level
3778void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3779    pCB->imageLayoutMap[imgpair] = node;
3780    // TODO (mlentine): Maybe make vector a set?
3781    auto subresource =
3782        std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3783    if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3784        pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
3785    }
3786}
3787
3788void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3789    // TODO (mlentine): Maybe make vector a set?
3790    if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3791        pCB->imageSubresourceMap[imgpair.image].end()) {
3792        pCB->imageLayoutMap[imgpair].layout = layout;
3793    } else {
3794        // TODO (mlentine): Could be expensive and might need to be removed.
3795        assert(imgpair.hasSubresource);
3796        IMAGE_CMD_BUF_LAYOUT_NODE node;
3797        if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3798            node.initialLayout = layout;
3799        }
3800        SetLayout(pCB, imgpair, {node.initialLayout, layout});
3801    }
3802}
3803
3804template <class OBJECT, class LAYOUT>
3805void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3806    if (imgpair.subresource.aspectMask & aspectMask) {
3807        imgpair.subresource.aspectMask = aspectMask;
3808        SetLayout(pObject, imgpair, layout);
3809    }
3810}
3811
3812template <class OBJECT, class LAYOUT>
3813void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
3814    ImageSubresourcePair imgpair = {image, true, range};
3815    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3816    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3817    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3818    SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3819}
3820
3821template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
3822    ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
3823    SetLayout(pObject, image, imgpair, layout);
3824}
3825
3826void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
3827    auto view_state = getImageViewState(dev_data, imageView);
3828    assert(view_state);
3829    auto image = view_state->create_info.image;
3830    const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange;
3831    // TODO: Do not iterate over every possibility - consolidate where possible
3832    for (uint32_t j = 0; j < subRange.levelCount; j++) {
3833        uint32_t level = subRange.baseMipLevel + j;
3834        for (uint32_t k = 0; k < subRange.layerCount; k++) {
3835            uint32_t layer = subRange.baseArrayLayer + k;
3836            VkImageSubresource sub = {subRange.aspectMask, level, layer};
3837            // TODO: If ImageView was created with depth or stencil, transition both layouts as
3838            // the aspectMask is ignored and both are used. Verify that the extra implicit layout
3839            // is OK for descriptor set layout validation
3840            if (subRange.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
3841                if (vk_format_is_depth_and_stencil(view_state->create_info.format)) {
3842                    sub.aspectMask |= (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT);
3843                }
3844            }
3845            SetLayout(pCB, image, sub, layout);
3846        }
3847    }
3848}
3849
3850// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3851// func_str is the name of the calling function
3852// Return false if no errors occur
3853// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
3854static bool validateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) {
3855    if (dev_data->instance_state->disabled.idle_descriptor_set)
3856        return false;
3857    bool skip_call = false;
3858    auto set_node = dev_data->setMap.find(set);
3859    if (set_node == dev_data->setMap.end()) {
3860        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3861                             (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
3862                             "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
3863                             (uint64_t)(set));
3864    } else {
3865        // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
3866        if (set_node->second->in_use.load()) {
3867            skip_call |=
3868                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3869                        (uint64_t)(set), __LINE__, VALIDATION_ERROR_00919, "DS",
3870                        "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer. %s",
3871                        func_str.c_str(), (uint64_t)(set), validation_error_map[VALIDATION_ERROR_00919]);
3872        }
3873    }
3874    return skip_call;
3875}
3876
3877// Remove set from setMap and delete the set
3878static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
3879    dev_data->setMap.erase(descriptor_set->GetSet());
3880    delete descriptor_set;
3881}
3882// Free all DS Pools including their Sets & related sub-structs
3883// NOTE : Calls to this function should be wrapped in mutex
3884static void deletePools(layer_data *my_data) {
3885    if (my_data->descriptorPoolMap.size() <= 0)
3886        return;
3887    for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
3888        // Remove this pools' sets from setMap and delete them
3889        for (auto ds : (*ii).second->sets) {
3890            freeDescriptorSet(my_data, ds);
3891        }
3892        (*ii).second->sets.clear();
3893    }
3894    my_data->descriptorPoolMap.clear();
3895}
3896
3897static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
3898                                VkDescriptorPoolResetFlags flags) {
3899    DESCRIPTOR_POOL_NODE *pPool = getPoolNode(my_data, pool);
3900    // TODO: validate flags
3901    // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
3902    for (auto ds : pPool->sets) {
3903        freeDescriptorSet(my_data, ds);
3904    }
3905    pPool->sets.clear();
3906    // Reset available count for each type and available sets for this pool
3907    for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
3908        pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
3909    }
3910    pPool->availableSets = pPool->maxSets;
3911}
3912
3913// For given CB object, fetch associated CB Node from map
3914static GLOBAL_CB_NODE *getCBNode(layer_data const *my_data, const VkCommandBuffer cb) {
3915    auto it = my_data->commandBufferMap.find(cb);
3916    if (it == my_data->commandBufferMap.end()) {
3917        log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3918                reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3919                "Attempt to use CommandBuffer 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(cb));
3920        return NULL;
3921    }
3922    return it->second;
3923}
3924// Free all CB Nodes
3925// NOTE : Calls to this function should be wrapped in mutex
3926static void deleteCommandBuffers(layer_data *my_data) {
3927    if (my_data->commandBufferMap.empty()) {
3928        return;
3929    }
3930    for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
3931        delete (*ii).second;
3932    }
3933    my_data->commandBufferMap.clear();
3934}
3935
3936static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
3937    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3938                   (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
3939                   "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
3940}
3941
3942bool validateCmdsInCmdBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
3943    if (!pCB->activeRenderPass)
3944        return false;
3945    bool skip_call = false;
3946    if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
3947        (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
3948        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3949                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3950                             "Commands cannot be called in a subpass using secondary command buffers.");
3951    } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
3952        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3953                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3954                             "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
3955    }
3956    return skip_call;
3957}
3958
3959static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3960    if (!(flags & VK_QUEUE_GRAPHICS_BIT))
3961        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3962                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3963                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3964    return false;
3965}
3966
3967static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3968    if (!(flags & VK_QUEUE_COMPUTE_BIT))
3969        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3970                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3971                       "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
3972    return false;
3973}
3974
3975static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3976    if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
3977        return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3978                       DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3979                       "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3980    return false;
3981}
3982
3983// Add specified CMD to the CmdBuffer in given pCB, flagging errors if CB is not
3984//  in the recording state or if there's an issue with the Cmd ordering
3985static bool addCmd(layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
3986    bool skip_call = false;
3987    auto pPool = getCommandPoolNode(my_data, pCB->createInfo.commandPool);
3988    if (pPool) {
3989        VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].queueFlags;
3990        switch (cmd) {
3991        case CMD_BINDPIPELINE:
3992        case CMD_BINDPIPELINEDELTA:
3993        case CMD_BINDDESCRIPTORSETS:
3994        case CMD_FILLBUFFER:
3995        case CMD_CLEARCOLORIMAGE:
3996        case CMD_SETEVENT:
3997        case CMD_RESETEVENT:
3998        case CMD_WAITEVENTS:
3999        case CMD_BEGINQUERY:
4000        case CMD_ENDQUERY:
4001        case CMD_RESETQUERYPOOL:
4002        case CMD_COPYQUERYPOOLRESULTS:
4003        case CMD_WRITETIMESTAMP:
4004            skip_call |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4005            break;
4006        case CMD_SETVIEWPORTSTATE:
4007        case CMD_SETSCISSORSTATE:
4008        case CMD_SETLINEWIDTHSTATE:
4009        case CMD_SETDEPTHBIASSTATE:
4010        case CMD_SETBLENDSTATE:
4011        case CMD_SETDEPTHBOUNDSSTATE:
4012        case CMD_SETSTENCILREADMASKSTATE:
4013        case CMD_SETSTENCILWRITEMASKSTATE:
4014        case CMD_SETSTENCILREFERENCESTATE:
4015        case CMD_BINDINDEXBUFFER:
4016        case CMD_BINDVERTEXBUFFER:
4017        case CMD_DRAW:
4018        case CMD_DRAWINDEXED:
4019        case CMD_DRAWINDIRECT:
4020        case CMD_DRAWINDEXEDINDIRECT:
4021        case CMD_BLITIMAGE:
4022        case CMD_CLEARATTACHMENTS:
4023        case CMD_CLEARDEPTHSTENCILIMAGE:
4024        case CMD_RESOLVEIMAGE:
4025        case CMD_BEGINRENDERPASS:
4026        case CMD_NEXTSUBPASS:
4027        case CMD_ENDRENDERPASS:
4028            skip_call |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
4029            break;
4030        case CMD_DISPATCH:
4031        case CMD_DISPATCHINDIRECT:
4032            skip_call |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
4033            break;
4034        case CMD_COPYBUFFER:
4035        case CMD_COPYIMAGE:
4036        case CMD_COPYBUFFERTOIMAGE:
4037        case CMD_COPYIMAGETOBUFFER:
4038        case CMD_CLONEIMAGEDATA:
4039        case CMD_UPDATEBUFFER:
4040        case CMD_PIPELINEBARRIER:
4041        case CMD_EXECUTECOMMANDS:
4042        case CMD_END:
4043            break;
4044        default:
4045            break;
4046        }
4047    }
4048    if (pCB->state != CB_RECORDING) {
4049        skip_call |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
4050    } else {
4051        skip_call |= validateCmdsInCmdBuffer(my_data, pCB, cmd);
4052        CMD_NODE cmdNode = {};
4053        // init cmd node and append to end of cmd LL
4054        cmdNode.cmdNumber = ++pCB->numCmds;
4055        cmdNode.type = cmd;
4056        pCB->cmds.push_back(cmdNode);
4057    }
4058    return skip_call;
4059}
4060// For given object struct return a ptr of BASE_NODE type for its wrapping struct
4061BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
4062    BASE_NODE *base_ptr = nullptr;
4063    switch (object_struct.type) {
4064    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
4065        base_ptr = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
4066        break;
4067    }
4068    case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
4069        base_ptr = getSamplerNode(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
4070        break;
4071    }
4072    case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
4073        base_ptr = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
4074        break;
4075    }
4076    case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
4077        base_ptr = getPipeline(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
4078        break;
4079    }
4080    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
4081        base_ptr = getBufferNode(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
4082        break;
4083    }
4084    case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
4085        base_ptr = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
4086        break;
4087    }
4088    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
4089        base_ptr = getImageNode(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
4090        break;
4091    }
4092    case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
4093        base_ptr = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
4094        break;
4095    }
4096    case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
4097        base_ptr = getEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
4098        break;
4099    }
4100    case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
4101        base_ptr = getPoolNode(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
4102        break;
4103    }
4104    case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
4105        base_ptr = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
4106        break;
4107    }
4108    case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
4109        base_ptr = getFramebuffer(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
4110        break;
4111    }
4112    case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
4113        base_ptr = getRenderPass(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
4114        break;
4115    }
4116    case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
4117        base_ptr = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
4118        break;
4119    }
4120    default:
4121        // TODO : Any other objects to be handled here?
4122        assert(0);
4123        break;
4124    }
4125    return base_ptr;
4126}
4127
4128// Tie the VK_OBJECT to the cmd buffer which includes:
4129//  Add object_binding to cmd buffer
4130//  Add cb_binding to object
4131static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
4132    cb_bindings->insert(cb_node);
4133    cb_node->object_bindings.insert(obj);
4134}
4135// For a given object, if cb_node is in that objects cb_bindings, remove cb_node
4136static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
4137    BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
4138    if (base_obj)
4139        base_obj->cb_bindings.erase(cb_node);
4140}
4141// Reset the command buffer state
4142//  Maintain the createInfo and set state to CB_NEW, but clear all other state
4143static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
4144    GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
4145    if (pCB) {
4146        pCB->in_use.store(0);
4147        pCB->cmds.clear();
4148        // Reset CB state (note that createInfo is not cleared)
4149        pCB->commandBuffer = cb;
4150        memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
4151        memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
4152        pCB->numCmds = 0;
4153        memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
4154        pCB->state = CB_NEW;
4155        pCB->submitCount = 0;
4156        pCB->status = 0;
4157        pCB->viewportMask = 0;
4158        pCB->scissorMask = 0;
4159
4160        for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
4161            pCB->lastBound[i].reset();
4162        }
4163
4164        memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
4165        pCB->activeRenderPass = nullptr;
4166        pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
4167        pCB->activeSubpass = 0;
4168        pCB->broken_bindings.clear();
4169        pCB->waitedEvents.clear();
4170        pCB->events.clear();
4171        pCB->writeEventsBeforeWait.clear();
4172        pCB->waitedEventsBeforeQueryReset.clear();
4173        pCB->queryToStateMap.clear();
4174        pCB->activeQueries.clear();
4175        pCB->startedQueries.clear();
4176        pCB->imageSubresourceMap.clear();
4177        pCB->imageLayoutMap.clear();
4178        pCB->eventToStageMap.clear();
4179        pCB->drawData.clear();
4180        pCB->currentDrawData.buffers.clear();
4181        pCB->primaryCommandBuffer = VK_NULL_HANDLE;
4182        // Make sure any secondaryCommandBuffers are removed from globalInFlight
4183        for (auto secondary_cb : pCB->secondaryCommandBuffers) {
4184            dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
4185        }
4186        pCB->secondaryCommandBuffers.clear();
4187        pCB->updateImages.clear();
4188        pCB->updateBuffers.clear();
4189        clear_cmd_buf_and_mem_references(dev_data, pCB);
4190        pCB->eventUpdates.clear();
4191        pCB->queryUpdates.clear();
4192
4193        // Remove object bindings
4194        for (auto obj : pCB->object_bindings) {
4195            removeCommandBufferBinding(dev_data, &obj, pCB);
4196        }
4197        pCB->object_bindings.clear();
4198        // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
4199        for (auto framebuffer : pCB->framebuffers) {
4200            auto fb_node = getFramebuffer(dev_data, framebuffer);
4201            if (fb_node)
4202                fb_node->cb_bindings.erase(pCB);
4203        }
4204        pCB->framebuffers.clear();
4205        pCB->activeFramebuffer = VK_NULL_HANDLE;
4206    }
4207}
4208
4209// Set PSO-related status bits for CB, including dynamic state set via PSO
4210static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_NODE *pPipe) {
4211    // Account for any dynamic state not set via this PSO
4212    if (!pPipe->graphicsPipelineCI.pDynamicState ||
4213        !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static
4214        pCB->status |= CBSTATUS_ALL;
4215    } else {
4216        // First consider all state on
4217        // Then unset any state that's noted as dynamic in PSO
4218        // Finally OR that into CB statemask
4219        CBStatusFlags psoDynStateMask = CBSTATUS_ALL;
4220        for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
4221            switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
4222            case VK_DYNAMIC_STATE_LINE_WIDTH:
4223                psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
4224                break;
4225            case VK_DYNAMIC_STATE_DEPTH_BIAS:
4226                psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
4227                break;
4228            case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
4229                psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
4230                break;
4231            case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
4232                psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
4233                break;
4234            case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
4235                psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4236                break;
4237            case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4238                psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4239                break;
4240            case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4241                psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4242                break;
4243            default:
4244                // TODO : Flag error here
4245                break;
4246            }
4247        }
4248        pCB->status |= psoDynStateMask;
4249    }
4250}
4251
4252// Print the last bound Gfx Pipeline
4253static bool printPipeline(layer_data *my_data, const VkCommandBuffer cb) {
4254    bool skip_call = false;
4255    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4256    if (pCB) {
4257        PIPELINE_NODE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_node;
4258        if (!pPipeTrav) {
4259            // nothing to print
4260        } else {
4261            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
4262                                 __LINE__, DRAWSTATE_NONE, "DS", "%s",
4263                                 vk_print_vkgraphicspipelinecreateinfo(
4264                                     reinterpret_cast<const VkGraphicsPipelineCreateInfo *>(&pPipeTrav->graphicsPipelineCI), "{DS}")
4265                                     .c_str());
4266        }
4267    }
4268    return skip_call;
4269}
4270
4271static void printCB(layer_data *my_data, const VkCommandBuffer cb) {
4272    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cb);
4273    if (pCB && pCB->cmds.size() > 0) {
4274        log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
4275                DRAWSTATE_NONE, "DS", "Cmds in CB 0x%p", (void *)cb);
4276        vector<CMD_NODE> cmds = pCB->cmds;
4277        for (auto ii = cmds.begin(); ii != cmds.end(); ++ii) {
4278            // TODO : Need to pass cb as srcObj here
4279            log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4280                    __LINE__, DRAWSTATE_NONE, "DS", "  CMD 0x%" PRIx64 ": %s", (*ii).cmdNumber, cmdTypeToString((*ii).type).c_str());
4281        }
4282    } else {
4283        // Nothing to print
4284    }
4285}
4286
4287static bool synchAndPrintDSConfig(layer_data *my_data, const VkCommandBuffer cb) {
4288    bool skip_call = false;
4289    if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
4290        return skip_call;
4291    }
4292    skip_call |= printPipeline(my_data, cb);
4293    return skip_call;
4294}
4295
4296// Flags validation error if the associated call is made inside a render pass. The apiName
4297// routine should ONLY be called outside a render pass.
4298static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4299    bool inside = false;
4300    if (pCB->activeRenderPass) {
4301        inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4302                         (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
4303                         "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 ")", apiName,
4304                         (uint64_t)pCB->activeRenderPass->renderPass);
4305    }
4306    return inside;
4307}
4308
4309// Flags validation error if the associated call is made outside a render pass. The apiName
4310// routine should ONLY be called inside a render pass.
4311static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName) {
4312    bool outside = false;
4313    if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4314        ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4315         !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4316        outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4317                          (uint64_t)pCB->commandBuffer, __LINE__, DRAWSTATE_NO_ACTIVE_RENDERPASS, "DS",
4318                          "%s: This call must be issued inside an active render pass.", apiName);
4319    }
4320    return outside;
4321}
4322
4323static void init_core_validation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
4324
4325    layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
4326
4327}
4328
4329static void checkInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, instance_layer_data *instance_data) {
4330    for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4331        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME))
4332            instance_data->surfaceExtensionEnabled = true;
4333        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_EXTENSION_NAME))
4334            instance_data->displayExtensionEnabled = true;
4335#ifdef VK_USE_PLATFORM_ANDROID_KHR
4336        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_ANDROID_SURFACE_EXTENSION_NAME))
4337            instance_data->androidSurfaceExtensionEnabled = true;
4338#endif
4339#ifdef VK_USE_PLATFORM_MIR_KHR
4340        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_MIR_SURFACE_EXTENSION_NAME))
4341            instance_data->mirSurfaceExtensionEnabled = true;
4342#endif
4343#ifdef VK_USE_PLATFORM_WAYLAND_KHR
4344        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME))
4345            instance_data->waylandSurfaceExtensionEnabled = true;
4346#endif
4347#ifdef VK_USE_PLATFORM_WIN32_KHR
4348        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WIN32_SURFACE_EXTENSION_NAME))
4349            instance_data->win32SurfaceExtensionEnabled = true;
4350#endif
4351#ifdef VK_USE_PLATFORM_XCB_KHR
4352        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XCB_SURFACE_EXTENSION_NAME))
4353            instance_data->xcbSurfaceExtensionEnabled = true;
4354#endif
4355#ifdef VK_USE_PLATFORM_XLIB_KHR
4356        if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XLIB_SURFACE_EXTENSION_NAME))
4357            instance_data->xlibSurfaceExtensionEnabled = true;
4358#endif
4359    }
4360}
4361
4362VKAPI_ATTR VkResult VKAPI_CALL
4363CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
4364    VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4365
4366    assert(chain_info->u.pLayerInfo);
4367    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4368    PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4369    if (fpCreateInstance == NULL)
4370        return VK_ERROR_INITIALIZATION_FAILED;
4371
4372    // Advance the link info for the next element on the chain
4373    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4374
4375    VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4376    if (result != VK_SUCCESS)
4377        return result;
4378
4379    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), instance_layer_data_map);
4380    instance_data->instance = *pInstance;
4381    layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
4382
4383    instance_data->report_data = debug_report_create_instance(
4384        &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
4385    checkInstanceRegisterExtensions(pCreateInfo, instance_data);
4386    init_core_validation(instance_data, pAllocator);
4387
4388    instance_data->instance_state = unique_ptr<INSTANCE_STATE>(new INSTANCE_STATE());
4389    ValidateLayerOrdering(*pCreateInfo);
4390
4391    return result;
4392}
4393
4394/* hook DestroyInstance to remove tableInstanceMap entry */
4395VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
4396    // TODOSC : Shouldn't need any customization here
4397    dispatch_key key = get_dispatch_key(instance);
4398    // TBD: Need any locking this early, in case this function is called at the
4399    // same time by more than one thread?
4400    instance_layer_data *instance_data = get_my_data_ptr(key, instance_layer_data_map);
4401    instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
4402
4403    std::lock_guard<std::mutex> lock(global_lock);
4404    // Clean up logging callback, if any
4405    while (instance_data->logging_callback.size() > 0) {
4406        VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
4407        layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator);
4408        instance_data->logging_callback.pop_back();
4409    }
4410
4411    layer_debug_report_destroy_instance(instance_data->report_data);
4412    layer_data_map.erase(key);
4413}
4414
4415static void checkDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
4416    uint32_t i;
4417    // TBD: Need any locking, in case this function is called at the same time
4418    // by more than one thread?
4419    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4420    dev_data->device_extensions.wsi_enabled = false;
4421    dev_data->device_extensions.wsi_display_swapchain_enabled = false;
4422
4423    for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4424        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4425            dev_data->device_extensions.wsi_enabled = true;
4426        if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME) == 0)
4427            dev_data->device_extensions.wsi_display_swapchain_enabled = true;
4428    }
4429}
4430
4431// Verify that queue family has been properly requested
4432bool ValidateRequestedQueueFamilyProperties(instance_layer_data *instance_data, VkPhysicalDevice gpu, const VkDeviceCreateInfo *create_info) {
4433    bool skip_call = false;
4434    auto physical_device_state = getPhysicalDeviceState(instance_data, gpu);
4435    // First check is app has actually requested queueFamilyProperties
4436    if (!physical_device_state) {
4437        skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
4438                             0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
4439                             "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
4440    } else if (QUERY_DETAILS != physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
4441        // TODO: This is not called out as an invalid use in the spec so make more informative recommendation.
4442        skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
4443                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST,
4444                             "DL", "Call to vkCreateDevice() w/o first calling vkGetPhysicalDeviceQueueFamilyProperties().");
4445    } else {
4446        // Check that the requested queue properties are valid
4447        for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
4448            uint32_t requestedIndex = create_info->pQueueCreateInfos[i].queueFamilyIndex;
4449            if (requestedIndex >= physical_device_state->queue_family_properties.size()) {
4450                skip_call |= log_msg(
4451                    instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
4452                    __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4453                    "Invalid queue create request in vkCreateDevice(). Invalid queueFamilyIndex %u requested.", requestedIndex);
4454            } else if (create_info->pQueueCreateInfos[i].queueCount >
4455                       physical_device_state->queue_family_properties[requestedIndex].queueCount) {
4456                skip_call |=
4457                    log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
4458                            0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4459                            "Invalid queue create request in vkCreateDevice(). QueueFamilyIndex %u only has %u queues, but "
4460                            "requested queueCount is %u.",
4461                            requestedIndex, physical_device_state->queue_family_properties[requestedIndex].queueCount,
4462                            create_info->pQueueCreateInfos[i].queueCount);
4463            }
4464        }
4465    }
4466    return skip_call;
4467}
4468
4469// Verify that features have been queried and that they are available
4470static bool ValidateRequestedFeatures(instance_layer_data *dev_data, VkPhysicalDevice phys, const VkPhysicalDeviceFeatures *requested_features) {
4471    bool skip_call = false;
4472
4473    auto phys_device_state = getPhysicalDeviceState(dev_data, phys);
4474    const VkBool32 *actual = reinterpret_cast<VkBool32 *>(&phys_device_state->features);
4475    const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
4476    // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
4477    //  Need to provide the struct member name with the issue. To do that seems like we'll
4478    //  have to loop through each struct member which should be done w/ codegen to keep in synch.
4479    uint32_t errors = 0;
4480    uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
4481    for (uint32_t i = 0; i < total_bools; i++) {
4482        if (requested[i] > actual[i]) {
4483            // TODO: Add index to struct member name helper to be able to include a feature name
4484            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4485                VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4486                "DL", "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, "
4487                "which is not available on this device.",
4488                i);
4489            errors++;
4490        }
4491    }
4492    if (errors && (UNCALLED == phys_device_state->vkGetPhysicalDeviceFeaturesState)) {
4493        // If user didn't request features, notify them that they should
4494        // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
4495        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4496                             VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4497                             "DL", "You requested features that are unavailable on this device. You should first query feature "
4498                                   "availability by calling vkGetPhysicalDeviceFeatures().");
4499    }
4500    return skip_call;
4501}
4502
4503VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4504                                            const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
4505    instance_layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), instance_layer_data_map);
4506    bool skip_call = false;
4507
4508    // Check that any requested features are available
4509    if (pCreateInfo->pEnabledFeatures) {
4510        skip_call |= ValidateRequestedFeatures(my_instance_data, gpu, pCreateInfo->pEnabledFeatures);
4511    }
4512    skip_call |= ValidateRequestedQueueFamilyProperties(my_instance_data, gpu, pCreateInfo);
4513
4514    if (skip_call) {
4515        return VK_ERROR_VALIDATION_FAILED_EXT;
4516    }
4517
4518    VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4519
4520    assert(chain_info->u.pLayerInfo);
4521    PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4522    PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
4523    PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice");
4524    if (fpCreateDevice == NULL) {
4525        return VK_ERROR_INITIALIZATION_FAILED;
4526    }
4527
4528    // Advance the link info for the next element on the chain
4529    chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4530
4531    VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4532    if (result != VK_SUCCESS) {
4533        return result;
4534    }
4535
4536    std::unique_lock<std::mutex> lock(global_lock);
4537    layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4538
4539    // Copy instance state into this device's layer_data struct
4540    my_device_data->instance_state = unique_ptr<INSTANCE_STATE>(new INSTANCE_STATE(*(my_instance_data->instance_state)));
4541    my_device_data->instance_data = my_instance_data;
4542    // Setup device dispatch table
4543    layer_init_device_dispatch_table(*pDevice, &my_device_data->dispatch_table, fpGetDeviceProcAddr);
4544    my_device_data->device = *pDevice;
4545
4546    my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
4547    checkDeviceRegisterExtensions(pCreateInfo, *pDevice);
4548    // Get physical device limits for this device
4549    my_instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties));
4550    uint32_t count;
4551    my_instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
4552    my_device_data->phys_dev_properties.queue_family_properties.resize(count);
4553    my_instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
4554        gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]);
4555    // TODO: device limits should make sure these are compatible
4556    if (pCreateInfo->pEnabledFeatures) {
4557        my_device_data->enabled_features = *pCreateInfo->pEnabledFeatures;
4558    } else {
4559        memset(&my_device_data->enabled_features, 0, sizeof(VkPhysicalDeviceFeatures));
4560    }
4561    // Store physical device mem limits into device layer_data struct
4562    my_instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props);
4563    lock.unlock();
4564
4565    ValidateLayerOrdering(*pCreateInfo);
4566
4567    return result;
4568}
4569
4570// prototype
4571VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
4572    // TODOSC : Shouldn't need any customization here
4573    dispatch_key key = get_dispatch_key(device);
4574    layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4575    // Free all the memory
4576    std::unique_lock<std::mutex> lock(global_lock);
4577    deletePipelines(dev_data);
4578    dev_data->renderPassMap.clear();
4579    deleteCommandBuffers(dev_data);
4580    // This will also delete all sets in the pool & remove them from setMap
4581    deletePools(dev_data);
4582    // All sets should be removed
4583    assert(dev_data->setMap.empty());
4584    for (auto del_layout : dev_data->descriptorSetLayoutMap) {
4585        delete del_layout.second;
4586    }
4587    dev_data->descriptorSetLayoutMap.clear();
4588    dev_data->imageViewMap.clear();
4589    dev_data->imageMap.clear();
4590    dev_data->imageSubresourceMap.clear();
4591    dev_data->imageLayoutMap.clear();
4592    dev_data->bufferViewMap.clear();
4593    dev_data->bufferMap.clear();
4594    // Queues persist until device is destroyed
4595    dev_data->queueMap.clear();
4596    lock.unlock();
4597#if MTMERGESOURCE
4598    bool skip_call = false;
4599    lock.lock();
4600    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4601            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "Printing List details prior to vkDestroyDevice()");
4602    log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4603            (uint64_t)device, __LINE__, MEMTRACK_NONE, "MEM", "================================================");
4604    print_mem_list(dev_data);
4605    printCBList(dev_data);
4606    // Report any memory leaks
4607    DEVICE_MEM_INFO *pInfo = NULL;
4608    if (!dev_data->memObjMap.empty()) {
4609        for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
4610            pInfo = (*ii).second.get();
4611            if (pInfo->alloc_info.allocationSize != 0) {
4612                // Valid Usage: All child objects created on device must have been destroyed prior to destroying device
4613                skip_call |=
4614                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
4615                            (uint64_t)pInfo->mem, __LINE__, MEMTRACK_MEMORY_LEAK, "MEM",
4616                            "Mem Object 0x%" PRIx64 " has not been freed. You should clean up this memory by calling "
4617                            "vkFreeMemory(0x%" PRIx64 ") prior to vkDestroyDevice().",
4618                            (uint64_t)(pInfo->mem), (uint64_t)(pInfo->mem));
4619            }
4620        }
4621    }
4622    layer_debug_report_destroy_device(device);
4623    lock.unlock();
4624
4625#if DISPATCH_MAP_DEBUG
4626    fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
4627#endif
4628    if (!skip_call) {
4629        dev_data->dispatch_table.DestroyDevice(device, pAllocator);
4630    }
4631#else
4632    dev_data->dispatch_table.DestroyDevice(device, pAllocator);
4633#endif
4634    layer_data_map.erase(key);
4635}
4636
4637static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4638
4639// This validates that the initial layout specified in the command buffer for
4640// the IMAGE is the same
4641// as the global IMAGE layout
4642static bool ValidateCmdBufImageLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4643    bool skip_call = false;
4644    for (auto cb_image_data : pCB->imageLayoutMap) {
4645        VkImageLayout imageLayout;
4646        if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4647            skip_call |=
4648                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4649                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image 0x%" PRIx64 ".",
4650                        reinterpret_cast<const uint64_t &>(cb_image_data.first));
4651        } else {
4652            if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4653                // TODO: Set memory invalid which is in mem_tracker currently
4654            } else if (imageLayout != cb_image_data.second.initialLayout) {
4655                if (cb_image_data.first.hasSubresource) {
4656                    skip_call |= log_msg(
4657                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4658                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4659                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") [sub-resource: aspectMask 0x%X array layer %u, mip level %u], "
4660                        "with layout %s when first use is %s.",
4661                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.aspectMask,
4662                                cb_image_data.first.subresource.arrayLayer,
4663                                cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout),
4664                        string_VkImageLayout(cb_image_data.second.initialLayout));
4665                } else {
4666                    skip_call |= log_msg(
4667                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4668                        reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
4669                        "Cannot submit cmd buffer using image (0x%" PRIx64 ") with layout %s when "
4670                        "first use is %s.",
4671                        reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4672                        string_VkImageLayout(cb_image_data.second.initialLayout));
4673                }
4674            }
4675            SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4676        }
4677    }
4678    return skip_call;
4679}
4680
4681// Loop through bound objects and increment their in_use counts
4682//  For any unknown objects, flag an error
4683static bool ValidateAndIncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
4684    bool skip = false;
4685    DRAW_STATE_ERROR error_code = DRAWSTATE_NONE;
4686    BASE_NODE *base_obj = nullptr;
4687    for (auto obj : cb_node->object_bindings) {
4688        switch (obj.type) {
4689        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
4690            base_obj = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(obj.handle));
4691            error_code = DRAWSTATE_INVALID_DESCRIPTOR_SET;
4692            break;
4693        }
4694        case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
4695            base_obj = getSamplerNode(dev_data, reinterpret_cast<VkSampler &>(obj.handle));
4696            error_code = DRAWSTATE_INVALID_SAMPLER;
4697            break;
4698        }
4699        case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
4700            base_obj = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(obj.handle));
4701            error_code = DRAWSTATE_INVALID_QUERY_POOL;
4702            break;
4703        }
4704        case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
4705            base_obj = getPipeline(dev_data, reinterpret_cast<VkPipeline &>(obj.handle));
4706            error_code = DRAWSTATE_INVALID_PIPELINE;
4707            break;
4708        }
4709        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
4710            base_obj = getBufferNode(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
4711            error_code = DRAWSTATE_INVALID_BUFFER;
4712            break;
4713        }
4714        case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
4715            base_obj = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(obj.handle));
4716            error_code = DRAWSTATE_INVALID_BUFFER_VIEW;
4717            break;
4718        }
4719        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
4720            base_obj = getImageNode(dev_data, reinterpret_cast<VkImage &>(obj.handle));
4721            error_code = DRAWSTATE_INVALID_IMAGE;
4722            break;
4723        }
4724        case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
4725            base_obj = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(obj.handle));
4726            error_code = DRAWSTATE_INVALID_IMAGE_VIEW;
4727            break;
4728        }
4729        case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
4730            base_obj = getEventNode(dev_data, reinterpret_cast<VkEvent &>(obj.handle));
4731            error_code = DRAWSTATE_INVALID_EVENT;
4732            break;
4733        }
4734        case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
4735            base_obj = getPoolNode(dev_data, reinterpret_cast<VkDescriptorPool &>(obj.handle));
4736            error_code = DRAWSTATE_INVALID_DESCRIPTOR_POOL;
4737            break;
4738        }
4739        case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
4740            base_obj = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(obj.handle));
4741            error_code = DRAWSTATE_INVALID_COMMAND_POOL;
4742            break;
4743        }
4744        case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
4745            base_obj = getFramebuffer(dev_data, reinterpret_cast<VkFramebuffer &>(obj.handle));
4746            error_code = DRAWSTATE_INVALID_FRAMEBUFFER;
4747            break;
4748        }
4749        case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
4750            base_obj = getRenderPass(dev_data, reinterpret_cast<VkRenderPass &>(obj.handle));
4751            error_code = DRAWSTATE_INVALID_RENDERPASS;
4752            break;
4753        }
4754        case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
4755            base_obj = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(obj.handle));
4756            error_code = DRAWSTATE_INVALID_DEVICE_MEMORY;
4757            break;
4758        }
4759        default:
4760            // TODO : Merge handling of other objects types into this code
4761            break;
4762        }
4763        if (!base_obj) {
4764            skip |=
4765                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj.type, obj.handle, __LINE__, error_code, "DS",
4766                        "Cannot submit cmd buffer using deleted %s 0x%" PRIx64 ".", object_type_to_string(obj.type), obj.handle);
4767        } else {
4768            base_obj->in_use.fetch_add(1);
4769        }
4770    }
4771    return skip;
4772}
4773
4774// Track which resources are in-flight by atomically incrementing their "in_use" count
4775static bool validateAndIncrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
4776    bool skip_call = false;
4777
4778    cb_node->in_use.fetch_add(1);
4779    dev_data->globalInFlightCmdBuffers.insert(cb_node->commandBuffer);
4780
4781    // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
4782    skip_call |= ValidateAndIncrementBoundObjects(dev_data, cb_node);
4783    // TODO : We should be able to remove the NULL look-up checks from the code below as long as
4784    //  all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
4785    //  should then be flagged prior to calling this function
4786    for (auto drawDataElement : cb_node->drawData) {
4787        for (auto buffer : drawDataElement.buffers) {
4788            auto buffer_node = getBufferNode(dev_data, buffer);
4789            if (!buffer_node) {
4790                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
4791                                     (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
4792                                     "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer));
4793            } else {
4794                buffer_node->in_use.fetch_add(1);
4795            }
4796        }
4797    }
4798    for (auto event : cb_node->writeEventsBeforeWait) {
4799        auto event_node = getEventNode(dev_data, event);
4800        if (event_node)
4801            event_node->write_in_use++;
4802    }
4803    return skip_call;
4804}
4805
4806// Note: This function assumes that the global lock is held by the calling
4807// thread.
4808// TODO: untangle this.
4809static bool cleanInFlightCmdBuffer(layer_data *my_data, VkCommandBuffer cmdBuffer) {
4810    bool skip_call = false;
4811    GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
4812    if (pCB) {
4813        for (auto queryEventsPair : pCB->waitedEventsBeforeQueryReset) {
4814            for (auto event : queryEventsPair.second) {
4815                if (my_data->eventMap[event].needsSignaled) {
4816                    skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
4817                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
4818                                         "Cannot get query results on queryPool 0x%" PRIx64
4819                                         " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
4820                                         (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
4821                }
4822            }
4823        }
4824    }
4825    return skip_call;
4826}
4827
4828// TODO: nuke this completely.
4829// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
4830static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
4831    // Pull it off of global list initially, but if we find it in any other queue list, add it back in
4832    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer);
4833    pCB->in_use.fetch_sub(1);
4834    if (!pCB->in_use.load()) {
4835        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
4836    }
4837}
4838
4839// Decrement in-use count for objects bound to command buffer
4840static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
4841    BASE_NODE *base_obj = nullptr;
4842    for (auto obj : cb_node->object_bindings) {
4843        base_obj = GetStateStructPtrFromObject(dev_data, obj);
4844        if (base_obj) {
4845            base_obj->in_use.fetch_sub(1);
4846        }
4847    }
4848}
4849
4850static bool RetireWorkOnQueue(layer_data *dev_data, QUEUE_NODE *pQueue, uint64_t seq)
4851{
4852    bool skip_call = false; // TODO: extract everything that might fail to precheck
4853    std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
4854
4855    // Roll this queue forward, one submission at a time.
4856    while (pQueue->seq < seq) {
4857        auto & submission = pQueue->submissions.front();
4858
4859        for (auto & wait : submission.waitSemaphores) {
4860            auto pSemaphore = getSemaphoreNode(dev_data, wait.semaphore);
4861            pSemaphore->in_use.fetch_sub(1);
4862            auto & lastSeq = otherQueueSeqs[wait.queue];
4863            lastSeq = std::max(lastSeq, wait.seq);
4864        }
4865
4866        for (auto & semaphore : submission.signalSemaphores) {
4867            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4868            pSemaphore->in_use.fetch_sub(1);
4869        }
4870
4871        for (auto cb : submission.cbs) {
4872            auto cb_node = getCBNode(dev_data, cb);
4873            // First perform decrement on general case bound objects
4874            DecrementBoundResources(dev_data, cb_node);
4875            for (auto drawDataElement : cb_node->drawData) {
4876                for (auto buffer : drawDataElement.buffers) {
4877                    auto buffer_node = getBufferNode(dev_data, buffer);
4878                    if (buffer_node) {
4879                        buffer_node->in_use.fetch_sub(1);
4880                    }
4881                }
4882            }
4883            for (auto event : cb_node->writeEventsBeforeWait) {
4884                auto eventNode = dev_data->eventMap.find(event);
4885                if (eventNode != dev_data->eventMap.end()) {
4886                    eventNode->second.write_in_use--;
4887                }
4888            }
4889            for (auto queryStatePair : cb_node->queryToStateMap) {
4890                dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4891            }
4892            for (auto eventStagePair : cb_node->eventToStageMap) {
4893                dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4894            }
4895
4896            skip_call |= cleanInFlightCmdBuffer(dev_data, cb);
4897            removeInFlightCmdBuffer(dev_data, cb);
4898        }
4899
4900        auto pFence = getFenceNode(dev_data, submission.fence);
4901        if (pFence) {
4902            pFence->state = FENCE_RETIRED;
4903        }
4904
4905        pQueue->submissions.pop_front();
4906        pQueue->seq++;
4907    }
4908
4909    // Roll other queues forward to the highest seq we saw a wait for
4910    for (auto qs : otherQueueSeqs) {
4911        skip_call |= RetireWorkOnQueue(dev_data, getQueueNode(dev_data, qs.first), qs.second);
4912    }
4913
4914    return skip_call;
4915}
4916
4917
4918// Submit a fence to a queue, delimiting previous fences and previous untracked
4919// work by it.
4920static void
4921SubmitFence(QUEUE_NODE *pQueue, FENCE_NODE *pFence, uint64_t submitCount)
4922{
4923    pFence->state = FENCE_INFLIGHT;
4924    pFence->signaler.first = pQueue->queue;
4925    pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
4926}
4927
4928static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
4929    bool skip_call = false;
4930    if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
4931        !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
4932        skip_call |=
4933            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4934                    __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
4935                    "Command Buffer 0x%" PRIx64 " is already in use and is not marked for simultaneous use.",
4936                    reinterpret_cast<uint64_t>(pCB->commandBuffer));
4937    }
4938    return skip_call;
4939}
4940
4941static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *call_source) {
4942    bool skip = false;
4943    if (dev_data->instance_state->disabled.command_buffer_state)
4944        return skip;
4945    // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
4946    if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
4947        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4948                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
4949                        "CB 0x%" PRIxLEAST64 " was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
4950                        "set, but has been submitted 0x%" PRIxLEAST64 " times.",
4951                        (uint64_t)(pCB->commandBuffer), pCB->submitCount);
4952    }
4953    // Validate that cmd buffers have been updated
4954    if (CB_RECORDED != pCB->state) {
4955        if (CB_INVALID == pCB->state) {
4956            // Inform app of reason CB invalid
4957            for (auto obj : pCB->broken_bindings) {
4958                const char *type_str = object_type_to_string(obj.type);
4959                // Descriptor sets are a special case that can be either destroyed or updated to invalidated a CB
4960                const char *cause_str =
4961                    (obj.type == VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT) ? "destroyed or updated" : "destroyed";
4962
4963                skip |=
4964                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4965                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
4966                            "You are submitting command buffer 0x%" PRIxLEAST64 " that is invalid because bound %s 0x%" PRIxLEAST64
4967                            " was %s.",
4968                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), type_str, obj.handle, cause_str);
4969            }
4970        } else { // Flag error for using CB w/o vkEndCommandBuffer() called
4971            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4972                            (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
4973                            "You must call vkEndCommandBuffer() on CB 0x%" PRIxLEAST64 " before this call to %s!",
4974                            reinterpret_cast<uint64_t &>(pCB->commandBuffer), call_source);
4975        }
4976    }
4977    return skip;
4978}
4979
4980// Validate that queueFamilyIndices of primary command buffers match this queue
4981// Secondary command buffers were previously validated in vkCmdExecuteCommands().
4982static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
4983    bool skip_call = false;
4984    auto pPool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
4985    auto queue_node = getQueueNode(dev_data, queue);
4986
4987    if (pPool && queue_node && (pPool->queueFamilyIndex != queue_node->queueFamilyIndex)) {
4988        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4989            reinterpret_cast<uint64_t>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
4990            "vkQueueSubmit: Primary command buffer 0x%" PRIxLEAST64
4991            " created in queue family %d is being submitted on queue 0x%" PRIxLEAST64 " from queue family %d.",
4992            reinterpret_cast<uint64_t>(pCB->commandBuffer), pPool->queueFamilyIndex,
4993            reinterpret_cast<uint64_t>(queue), queue_node->queueFamilyIndex);
4994    }
4995
4996    return skip_call;
4997}
4998
4999static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
5000    // Track in-use for resources off of primary and any secondary CBs
5001    bool skip_call = false;
5002
5003    // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
5004    // on device
5005    skip_call |= validateCommandBufferSimultaneousUse(dev_data, pCB);
5006
5007    skip_call |= validateAndIncrementResources(dev_data, pCB);
5008
5009    if (!pCB->secondaryCommandBuffers.empty()) {
5010        for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
5011            GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
5012            skip_call |= validateAndIncrementResources(dev_data, pSubCB);
5013            if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
5014                !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
5015                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
5016                        __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
5017                        "CB 0x%" PRIxLEAST64 " was submitted with secondary buffer 0x%" PRIxLEAST64
5018                        " but that buffer has subsequently been bound to "
5019                        "primary cmd buffer 0x%" PRIxLEAST64
5020                        " and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.",
5021                        reinterpret_cast<uint64_t>(pCB->commandBuffer), reinterpret_cast<uint64_t>(secondaryCmdBuffer),
5022                        reinterpret_cast<uint64_t>(pSubCB->primaryCommandBuffer));
5023            }
5024        }
5025    }
5026
5027    skip_call |= validateCommandBufferState(dev_data, pCB, "vkQueueSubmit()");
5028
5029    return skip_call;
5030}
5031
5032static bool
5033ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence)
5034{
5035    bool skip_call = false;
5036
5037    if (pFence) {
5038        if (pFence->state == FENCE_INFLIGHT) {
5039            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5040                                 (uint64_t)(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
5041                                 "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(pFence->fence));
5042        }
5043
5044        else if (pFence->state == FENCE_RETIRED) {
5045            skip_call |=
5046                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5047                        reinterpret_cast<uint64_t &>(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5048                        "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state.  Fences must be reset before being submitted",
5049                        reinterpret_cast<uint64_t &>(pFence->fence));
5050        }
5051    }
5052
5053    return skip_call;
5054}
5055
5056
5057VKAPI_ATTR VkResult VKAPI_CALL
5058QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
5059    bool skip_call = false;
5060    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5061    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5062    std::unique_lock<std::mutex> lock(global_lock);
5063
5064    auto pQueue = getQueueNode(dev_data, queue);
5065    auto pFence = getFenceNode(dev_data, fence);
5066    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
5067
5068    if (skip_call) {
5069        return VK_ERROR_VALIDATION_FAILED_EXT;
5070    }
5071
5072    // TODO : Review these old print functions and clean up as appropriate
5073    print_mem_list(dev_data);
5074    printCBList(dev_data);
5075
5076    // Mark the fence in-use.
5077    if (pFence) {
5078        SubmitFence(pQueue, pFence, std::max(1u, submitCount));
5079    }
5080
5081    // Now verify each individual submit
5082    for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
5083        const VkSubmitInfo *submit = &pSubmits[submit_idx];
5084        vector<SEMAPHORE_WAIT> semaphore_waits;
5085        vector<VkSemaphore> semaphore_signals;
5086        for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
5087            VkSemaphore semaphore = submit->pWaitSemaphores[i];
5088            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
5089            if (pSemaphore) {
5090                if (pSemaphore->signaled) {
5091                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
5092                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
5093                        pSemaphore->in_use.fetch_add(1);
5094                    }
5095                    pSemaphore->signaler.first = VK_NULL_HANDLE;
5096                    pSemaphore->signaled = false;
5097                } else {
5098                    skip_call |=
5099                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5100                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
5101                                "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
5102                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
5103                }
5104            }
5105        }
5106        for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
5107            VkSemaphore semaphore = submit->pSignalSemaphores[i];
5108            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
5109            if (pSemaphore) {
5110                if (pSemaphore->signaled) {
5111                    skip_call |=
5112                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
5113                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
5114                                "Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
5115                                " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
5116                                reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore),
5117                                reinterpret_cast<uint64_t &>(pSemaphore->signaler.first));
5118                } else {
5119                    pSemaphore->signaler.first = queue;
5120                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
5121                    pSemaphore->signaled = true;
5122                    pSemaphore->in_use.fetch_add(1);
5123                    semaphore_signals.push_back(semaphore);
5124                }
5125            }
5126        }
5127
5128        std::vector<VkCommandBuffer> cbs;
5129
5130        for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
5131            auto pCBNode = getCBNode(dev_data, submit->pCommandBuffers[i]);
5132            skip_call |= ValidateCmdBufImageLayouts(dev_data, pCBNode);
5133            if (pCBNode) {
5134                cbs.push_back(submit->pCommandBuffers[i]);
5135                for (auto secondaryCmdBuffer : pCBNode->secondaryCommandBuffers) {
5136                    cbs.push_back(secondaryCmdBuffer);
5137                }
5138
5139                pCBNode->submitCount++; // increment submit count
5140                skip_call |= validatePrimaryCommandBufferState(dev_data, pCBNode);
5141                skip_call |= validateQueueFamilyIndices(dev_data, pCBNode, queue);
5142                // Potential early exit here as bad object state may crash in delayed function calls
5143                if (skip_call)
5144                    return result;
5145                // Call submit-time functions to validate/update state
5146                for (auto &function : pCBNode->validate_functions) {
5147                    skip_call |= function();
5148                }
5149                for (auto &function : pCBNode->eventUpdates) {
5150                    skip_call |= function(queue);
5151                }
5152                for (auto &function : pCBNode->queryUpdates) {
5153                    skip_call |= function(queue);
5154                }
5155            }
5156        }
5157
5158        pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals,
5159                                         submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
5160    }
5161
5162    if (pFence && !submitCount) {
5163        // If no submissions, but just dropping a fence on the end of the queue,
5164        // record an empty submission with just the fence, so we can determine
5165        // its completion.
5166        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
5167                                         std::vector<SEMAPHORE_WAIT>(),
5168                                         std::vector<VkSemaphore>(),
5169                                         fence);
5170    }
5171
5172    lock.unlock();
5173    if (!skip_call)
5174        result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
5175
5176    return result;
5177}
5178
5179VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
5180                                              const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
5181    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5182    VkResult result = my_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
5183    // TODO : Track allocations and overall size here
5184    std::lock_guard<std::mutex> lock(global_lock);
5185    add_mem_obj_info(my_data, device, *pMemory, pAllocateInfo);
5186    print_mem_list(my_data);
5187    return result;
5188}
5189
5190VKAPI_ATTR void VKAPI_CALL
5191FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
5192    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5193
5194    // From spec : A memory object is freed by calling vkFreeMemory() when it is no longer needed.
5195    // Before freeing a memory object, an application must ensure the memory object is no longer
5196    // in use by the device—for example by command buffers queued for execution. The memory need
5197    // not yet be unbound from all images and buffers, but any further use of those images or
5198    // buffers (on host or device) for anything other than destroying those objects will result in
5199    // undefined behavior.
5200
5201    std::unique_lock<std::mutex> lock(global_lock);
5202    bool skip_call = freeMemObjInfo(my_data, device, mem, false);
5203    print_mem_list(my_data);
5204    printCBList(my_data);
5205    lock.unlock();
5206    if (!skip_call) {
5207        my_data->dispatch_table.FreeMemory(device, mem, pAllocator);
5208    }
5209}
5210
5211// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
5212//  and that the size of the map range should be:
5213//  1. Not zero
5214//  2. Within the size of the memory allocation
5215static bool ValidateMapMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5216    bool skip_call = false;
5217
5218    if (size == 0) {
5219        skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5220                            (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5221                            "VkMapMemory: Attempting to map memory range of size zero");
5222    }
5223
5224    auto mem_element = my_data->memObjMap.find(mem);
5225    if (mem_element != my_data->memObjMap.end()) {
5226        auto mem_info = mem_element->second.get();
5227        // It is an application error to call VkMapMemory on an object that is already mapped
5228        if (mem_info->mem_range.size != 0) {
5229            skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5230                                (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5231                                "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem);
5232        }
5233
5234        // Validate that offset + size is within object's allocationSize
5235        if (size == VK_WHOLE_SIZE) {
5236            if (offset >= mem_info->alloc_info.allocationSize) {
5237                skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5238                                    VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5239                                    "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
5240                                           " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
5241                                    offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
5242            }
5243        } else {
5244            if ((offset + size) > mem_info->alloc_info.allocationSize) {
5245                skip_call =
5246                    log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5247                            (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5248                            "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64, offset,
5249                            size + offset, mem_info->alloc_info.allocationSize);
5250            }
5251        }
5252    }
5253    return skip_call;
5254}
5255
5256static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
5257    auto mem_info = getMemObjInfo(my_data, mem);
5258    if (mem_info) {
5259        mem_info->mem_range.offset = offset;
5260        mem_info->mem_range.size = size;
5261    }
5262}
5263
5264static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
5265    bool skip_call = false;
5266    auto mem_info = getMemObjInfo(my_data, mem);
5267    if (mem_info) {
5268        if (!mem_info->mem_range.size) {
5269            // Valid Usage: memory must currently be mapped
5270            skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5271                                (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5272                                "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
5273        }
5274        mem_info->mem_range.size = 0;
5275        if (mem_info->shadow_copy) {
5276            free(mem_info->shadow_copy_base);
5277            mem_info->shadow_copy_base = 0;
5278            mem_info->shadow_copy = 0;
5279        }
5280    }
5281    return skip_call;
5282}
5283
5284// Guard value for pad data
5285static char NoncoherentMemoryFillValue = 0xb;
5286
5287static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
5288                                     void **ppData) {
5289    auto mem_info = getMemObjInfo(dev_data, mem);
5290    if (mem_info) {
5291        mem_info->p_driver_data = *ppData;
5292        uint32_t index = mem_info->alloc_info.memoryTypeIndex;
5293        if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
5294            mem_info->shadow_copy = 0;
5295        } else {
5296            if (size == VK_WHOLE_SIZE) {
5297                size = mem_info->alloc_info.allocationSize - offset;
5298            }
5299            mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5300            assert(vk_safe_modulo(mem_info->shadow_pad_size,
5301                                  dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) == 0);
5302            // Ensure start of mapped region reflects hardware alignment constraints
5303            uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5304
5305            // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
5306            uint64_t start_offset = offset % map_alignment;
5307            // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
5308            mem_info->shadow_copy_base = malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
5309
5310            mem_info->shadow_copy =
5311                reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
5312                                         ~(map_alignment - 1)) + start_offset;
5313            assert(vk_safe_modulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
5314                                  map_alignment) == 0);
5315
5316            memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
5317            *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
5318        }
5319    }
5320}
5321
5322// Verify that state for fence being waited on is appropriate. That is,
5323//  a fence being waited on should not already be signaled and
5324//  it should have been submitted on a queue or during acquire next image
5325static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
5326    bool skip_call = false;
5327
5328    auto pFence = getFenceNode(dev_data, fence);
5329    if (pFence) {
5330        if (pFence->state == FENCE_UNSIGNALED) {
5331            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5332                                 reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5333                                 "%s called for fence 0x%" PRIxLEAST64 " which has not been submitted on a Queue or during "
5334                                 "acquire next image.",
5335                                 apiCall, reinterpret_cast<uint64_t &>(fence));
5336        }
5337    }
5338    return skip_call;
5339}
5340
5341static bool RetireFence(layer_data *dev_data, VkFence fence) {
5342    auto pFence = getFenceNode(dev_data, fence);
5343    if (pFence->signaler.first != VK_NULL_HANDLE) {
5344        /* Fence signaller is a queue -- use this as proof that prior operations
5345         * on that queue have completed.
5346         */
5347        return RetireWorkOnQueue(dev_data,
5348                                 getQueueNode(dev_data, pFence->signaler.first),
5349                                 pFence->signaler.second);
5350    }
5351    else {
5352        /* Fence signaller is the WSI. We're not tracking what the WSI op
5353         * actually /was/ in CV yet, but we need to mark the fence as retired.
5354         */
5355        pFence->state = FENCE_RETIRED;
5356        return false;
5357    }
5358}
5359
5360VKAPI_ATTR VkResult VKAPI_CALL
5361WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
5362    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5363    bool skip_call = false;
5364    // Verify fence status of submitted fences
5365    std::unique_lock<std::mutex> lock(global_lock);
5366    for (uint32_t i = 0; i < fenceCount; i++) {
5367        skip_call |= verifyWaitFenceState(dev_data, pFences[i], "vkWaitForFences");
5368    }
5369    lock.unlock();
5370    if (skip_call)
5371        return VK_ERROR_VALIDATION_FAILED_EXT;
5372
5373    VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
5374
5375    if (result == VK_SUCCESS) {
5376        lock.lock();
5377        // When we know that all fences are complete we can clean/remove their CBs
5378        if (waitAll || fenceCount == 1) {
5379            for (uint32_t i = 0; i < fenceCount; i++) {
5380                skip_call |= RetireFence(dev_data, pFences[i]);
5381            }
5382        }
5383        // NOTE : Alternate case not handled here is when some fences have completed. In
5384        //  this case for app to guarantee which fences completed it will have to call
5385        //  vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
5386        lock.unlock();
5387    }
5388    if (skip_call)
5389        return VK_ERROR_VALIDATION_FAILED_EXT;
5390    return result;
5391}
5392
5393VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
5394    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5395    bool skip_call = false;
5396    std::unique_lock<std::mutex> lock(global_lock);
5397    skip_call = verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
5398    lock.unlock();
5399
5400    if (skip_call)
5401        return VK_ERROR_VALIDATION_FAILED_EXT;
5402
5403    VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
5404    lock.lock();
5405    if (result == VK_SUCCESS) {
5406        skip_call |= RetireFence(dev_data, fence);
5407    }
5408    lock.unlock();
5409    if (skip_call)
5410        return VK_ERROR_VALIDATION_FAILED_EXT;
5411    return result;
5412}
5413
5414VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
5415                                                            VkQueue *pQueue) {
5416    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5417    dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
5418    std::lock_guard<std::mutex> lock(global_lock);
5419
5420    // Add queue to tracking set only if it is new
5421    auto result = dev_data->queues.emplace(*pQueue);
5422    if (result.second == true) {
5423        QUEUE_NODE *pQNode = &dev_data->queueMap[*pQueue];
5424        pQNode->queue = *pQueue;
5425        pQNode->queueFamilyIndex = queueFamilyIndex;
5426        pQNode->seq = 0;
5427    }
5428}
5429
5430VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
5431    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
5432    bool skip_call = false;
5433    std::unique_lock<std::mutex> lock(global_lock);
5434    auto pQueue = getQueueNode(dev_data, queue);
5435    skip_call |= RetireWorkOnQueue(dev_data, pQueue, pQueue->seq + pQueue->submissions.size());
5436    lock.unlock();
5437    if (skip_call)
5438        return VK_ERROR_VALIDATION_FAILED_EXT;
5439    VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
5440    return result;
5441}
5442
5443VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
5444    bool skip_call = false;
5445    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5446    std::unique_lock<std::mutex> lock(global_lock);
5447    for (auto & queue : dev_data->queueMap) {
5448        skip_call |= RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
5449    }
5450    lock.unlock();
5451    if (skip_call)
5452        return VK_ERROR_VALIDATION_FAILED_EXT;
5453    VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
5454    return result;
5455}
5456
5457VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
5458    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5459    bool skip_call = false;
5460    std::unique_lock<std::mutex> lock(global_lock);
5461    auto fence_pair = dev_data->fenceMap.find(fence);
5462    if (fence_pair != dev_data->fenceMap.end()) {
5463        if (fence_pair->second.state == FENCE_INFLIGHT) {
5464            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5465                                 (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", "Fence 0x%" PRIx64 " is in use.",
5466                                 (uint64_t)(fence));
5467        }
5468        dev_data->fenceMap.erase(fence_pair);
5469    }
5470    lock.unlock();
5471
5472    if (!skip_call)
5473        dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
5474}
5475
5476// For given obj node, if it is use, flag a validation error and return callback result, else return false
5477bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct) {
5478    if (dev_data->instance_state->disabled.object_in_use)
5479        return false;
5480    bool skip = false;
5481    if (obj_node->in_use.load()) {
5482        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_struct.type, obj_struct.handle, __LINE__,
5483                        DRAWSTATE_OBJECT_INUSE, "DS", "Cannot delete %s 0x%" PRIx64 " that is currently in use by a command buffer.",
5484                        object_type_to_string(obj_struct.type), obj_struct.handle);
5485    }
5486    return skip;
5487}
5488
5489VKAPI_ATTR void VKAPI_CALL
5490DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
5491    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5492    bool skip = false;
5493    std::unique_lock<std::mutex> lock(global_lock);
5494    auto sema_node = getSemaphoreNode(dev_data, semaphore);
5495    if (sema_node) {
5496        skip |= ValidateObjectNotInUse(dev_data, sema_node,
5497                                       {reinterpret_cast<uint64_t &>(semaphore), VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT});
5498    }
5499    if (!skip) {
5500        dev_data->semaphoreMap.erase(semaphore);
5501        lock.unlock();
5502        dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
5503    }
5504}
5505
5506VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
5507    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5508    bool skip = false;
5509    std::unique_lock<std::mutex> lock(global_lock);
5510    auto event_node = getEventNode(dev_data, event);
5511    if (event_node) {
5512        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT};
5513        skip |= ValidateObjectNotInUse(dev_data, event_node, obj_struct);
5514        // Any bound cmd buffers are now invalid
5515        invalidateCommandBuffers(event_node->cb_bindings, obj_struct);
5516    }
5517    if (!skip) {
5518        dev_data->eventMap.erase(event);
5519        lock.unlock();
5520        dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
5521    }
5522}
5523
5524VKAPI_ATTR void VKAPI_CALL
5525DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
5526    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5527    bool skip = false;
5528    std::unique_lock<std::mutex> lock(global_lock);
5529    auto qp_node = getQueryPoolNode(dev_data, queryPool);
5530    if (qp_node) {
5531        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT};
5532        skip |= ValidateObjectNotInUse(dev_data, qp_node, obj_struct);
5533        // Any bound cmd buffers are now invalid
5534        invalidateCommandBuffers(qp_node->cb_bindings, obj_struct);
5535    }
5536    if (!skip) {
5537        dev_data->queryPoolMap.erase(queryPool);
5538        lock.unlock();
5539        dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
5540    }
5541}
5542
5543VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
5544                                                   uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
5545                                                   VkQueryResultFlags flags) {
5546    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5547    unordered_map<QueryObject, vector<VkCommandBuffer>> queriesInFlight;
5548    std::unique_lock<std::mutex> lock(global_lock);
5549    for (auto cmdBuffer : dev_data->globalInFlightCmdBuffers) {
5550        auto pCB = getCBNode(dev_data, cmdBuffer);
5551        for (auto queryStatePair : pCB->queryToStateMap) {
5552            queriesInFlight[queryStatePair.first].push_back(cmdBuffer);
5553        }
5554    }
5555    bool skip_call = false;
5556    for (uint32_t i = 0; i < queryCount; ++i) {
5557        QueryObject query = {queryPool, firstQuery + i};
5558        auto queryElement = queriesInFlight.find(query);
5559        auto queryToStateElement = dev_data->queryToStateMap.find(query);
5560        if (queryToStateElement != dev_data->queryToStateMap.end()) {
5561            // Available and in flight
5562            if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5563                queryToStateElement->second) {
5564                for (auto cmdBuffer : queryElement->second) {
5565                    auto pCB = getCBNode(dev_data, cmdBuffer);
5566                    auto queryEventElement = pCB->waitedEventsBeforeQueryReset.find(query);
5567                    if (queryEventElement == pCB->waitedEventsBeforeQueryReset.end()) {
5568                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5569                                             VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5570                                             "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
5571                                             (uint64_t)(queryPool), firstQuery + i);
5572                    } else {
5573                        for (auto event : queryEventElement->second) {
5574                            dev_data->eventMap[event].needsSignaled = true;
5575                        }
5576                    }
5577                }
5578                // Unavailable and in flight
5579            } else if (queryElement != queriesInFlight.end() && queryToStateElement != dev_data->queryToStateMap.end() &&
5580                       !queryToStateElement->second) {
5581                // TODO : Can there be the same query in use by multiple command buffers in flight?
5582                bool make_available = false;
5583                for (auto cmdBuffer : queryElement->second) {
5584                    auto pCB = getCBNode(dev_data, cmdBuffer);
5585                    make_available |= pCB->queryToStateMap[query];
5586                }
5587                if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5588                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5589                                         VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5590                                         "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5591                                         (uint64_t)(queryPool), firstQuery + i);
5592                }
5593                // Unavailable
5594            } else if (queryToStateElement != dev_data->queryToStateMap.end() && !queryToStateElement->second) {
5595                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5596                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5597                                     "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5598                                     (uint64_t)(queryPool), firstQuery + i);
5599                // Unitialized
5600            } else if (queryToStateElement == dev_data->queryToStateMap.end()) {
5601                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5602                                     VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5603                                     "Cannot get query results on queryPool 0x%" PRIx64
5604                                     " with index %d as data has not been collected for this index.",
5605                                     (uint64_t)(queryPool), firstQuery + i);
5606            }
5607        }
5608    }
5609    lock.unlock();
5610    if (skip_call)
5611        return VK_ERROR_VALIDATION_FAILED_EXT;
5612    return dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
5613}
5614
5615static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5616    bool skip_call = false;
5617    auto buffer_node = getBufferNode(my_data, buffer);
5618    if (!buffer_node) {
5619        skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5620                             (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
5621                             "Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
5622    } else {
5623        if (buffer_node->in_use.load()) {
5624            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5625                                 (uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
5626                                 "Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
5627        }
5628    }
5629    return skip_call;
5630}
5631
5632// Return true if given ranges intersect, else false
5633// Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
5634//  in an error so not checking that here
5635// pad_ranges bool indicates a linear and non-linear comparison which requires padding
5636// In the case where padding is required, if an alias is encountered then a validation error is reported and skip_call
5637//  may be set by the callback function so caller should merge in skip_call value if padding case is possible.
5638static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip_call) {
5639    *skip_call = false;
5640    auto r1_start = range1->start;
5641    auto r1_end = range1->end;
5642    auto r2_start = range2->start;
5643    auto r2_end = range2->end;
5644    VkDeviceSize pad_align = 1;
5645    if (range1->linear != range2->linear) {
5646        pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
5647    }
5648    if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1)))
5649        return false;
5650    if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1)))
5651        return false;
5652
5653    if (range1->linear != range2->linear) {
5654        // In linear vs. non-linear case, it's an error to alias
5655        const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
5656        const char *r1_type_str = range1->image ? "image" : "buffer";
5657        const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
5658        const char *r2_type_str = range2->image ? "image" : "buffer";
5659        auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
5660        *skip_call |=
5661            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, range1->handle, 0, MEMTRACK_INVALID_ALIASING,
5662                    "MEM", "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
5663                           " which is in violation of the Buffer-Image Granularity section of the Vulkan specification.",
5664                    r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
5665    }
5666    // Ranges intersect
5667    return true;
5668}
5669// Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
5670static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
5671    // Create a local MEMORY_RANGE struct to wrap offset/size
5672    MEMORY_RANGE range_wrap;
5673    // Synch linear with range1 to avoid padding and potential validation error case
5674    range_wrap.linear = range1->linear;
5675    range_wrap.start = offset;
5676    range_wrap.end = end;
5677    bool tmp_bool;
5678    return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool);
5679}
5680// For given mem_info, set all ranges valid that intersect [offset-end] range
5681// TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid
5682static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) {
5683    bool tmp_bool = false;
5684    MEMORY_RANGE map_range;
5685    map_range.linear = true;
5686    map_range.start = offset;
5687    map_range.end = end;
5688    for (auto &handle_range_pair : mem_info->bound_ranges) {
5689        if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool)) {
5690            // TODO : WARN here if tmp_bool true?
5691            handle_range_pair.second.valid = true;
5692        }
5693    }
5694}
5695// Object with given handle is being bound to memory w/ given mem_info struct.
5696//  Track the newly bound memory range with given memoryOffset
5697//  Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
5698//  and non-linear range incorrectly overlap.
5699// Return true if an error is flagged and the user callback returns "true", otherwise false
5700// is_image indicates an image object, otherwise handle is for a buffer
5701// is_linear indicates a buffer or linear image
5702static bool InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
5703                              VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
5704    bool skip_call = false;
5705    MEMORY_RANGE range;
5706
5707    range.image = is_image;
5708    range.handle = handle;
5709    range.linear = is_linear;
5710    range.valid = mem_info->global_valid;
5711    range.memory = mem_info->mem;
5712    range.start = memoryOffset;
5713    range.size = memRequirements.size;
5714    range.end = memoryOffset + memRequirements.size - 1;
5715    range.aliases.clear();
5716    // Update Memory aliasing
5717    // Save aliase ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
5718    // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
5719    std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
5720    for (auto &obj_range_pair : mem_info->bound_ranges) {
5721        auto check_range = &obj_range_pair.second;
5722        bool intersection_error = false;
5723        if (rangesIntersect(dev_data, &range, check_range, &intersection_error)) {
5724            skip_call |= intersection_error;
5725            range.aliases.insert(check_range);
5726            tmp_alias_ranges.insert(check_range);
5727        }
5728    }
5729    mem_info->bound_ranges[handle] = std::move(range);
5730    for (auto tmp_range : tmp_alias_ranges) {
5731        tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
5732    }
5733    if (is_image)
5734        mem_info->bound_images.insert(handle);
5735    else
5736        mem_info->bound_buffers.insert(handle);
5737
5738    return skip_call;
5739}
5740
5741static bool InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5742                                   VkMemoryRequirements mem_reqs, bool is_linear) {
5743    return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(image), mem_info, mem_offset, mem_reqs, true, is_linear);
5744}
5745
5746static bool InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
5747                                    VkMemoryRequirements mem_reqs) {
5748    return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(buffer), mem_info, mem_offset, mem_reqs, false, true);
5749}
5750
5751// Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
5752//  is_image indicates if handle is for image or buffer
5753//  This function will also remove the handle-to-index mapping from the appropriate
5754//  map and clean up any aliases for range being removed.
5755static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
5756    auto erase_range = &mem_info->bound_ranges[handle];
5757    for (auto alias_range : erase_range->aliases) {
5758        alias_range->aliases.erase(erase_range);
5759    }
5760    erase_range->aliases.clear();
5761    mem_info->bound_ranges.erase(handle);
5762    if (is_image) {
5763        mem_info->bound_images.erase(handle);
5764    } else {
5765        mem_info->bound_buffers.erase(handle);
5766    }
5767}
5768
5769static void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
5770
5771static void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
5772
5773VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer,
5774                                         const VkAllocationCallbacks *pAllocator) {
5775    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5776    std::unique_lock<std::mutex> lock(global_lock);
5777    if (!validateIdleBuffer(dev_data, buffer)) {
5778        // Clean up memory binding and range information for buffer
5779        auto buff_node = getBufferNode(dev_data, buffer);
5780        if (buff_node) {
5781            // Any bound cmd buffers are now invalid
5782            invalidateCommandBuffers(buff_node->cb_bindings,
5783                                     {reinterpret_cast<uint64_t &>(buff_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
5784            auto mem_info = getMemObjInfo(dev_data, buff_node->mem);
5785            if (mem_info) {
5786                RemoveBufferMemoryRange(reinterpret_cast<uint64_t &>(buffer), mem_info);
5787            }
5788            clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5789            dev_data->bufferMap.erase(buff_node->buffer);
5790        }
5791        lock.unlock();
5792        dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
5793    }
5794}
5795
5796static bool PreCallValidateDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE **buffer_view_state,
5797                                             VK_OBJECT *obj_struct) {
5798    if (dev_data->instance_state->disabled.destroy_buffer_view)
5799        return false;
5800    bool skip = false;
5801    *buffer_view_state = getBufferViewState(dev_data, buffer_view);
5802    if (buffer_view_state) {
5803        *obj_struct = {reinterpret_cast<uint64_t &>(buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT};
5804        skip |= ValidateObjectNotInUse(dev_data, *buffer_view_state, *obj_struct);
5805    }
5806    return skip;
5807}
5808
5809static void PostCallRecordDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE *buffer_view_state,
5810                                            VK_OBJECT obj_struct) {
5811    // Any bound cmd buffers are now invalid
5812    invalidateCommandBuffers(buffer_view_state->cb_bindings, obj_struct);
5813    dev_data->bufferViewMap.erase(buffer_view);
5814}
5815
5816VKAPI_ATTR void VKAPI_CALL
5817DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
5818    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5819    std::unique_lock<std::mutex> lock(global_lock);
5820    // Common data objects use pre & post call
5821    BUFFER_VIEW_STATE *buffer_view_state = nullptr;
5822    VK_OBJECT obj_struct;
5823    // Validate state before calling down chain, update common data if we'll be calling down chain
5824    bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
5825    if (!skip) {
5826        lock.unlock();
5827        dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
5828        lock.lock();
5829        // We made call so update state
5830        PostCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
5831    }
5832}
5833
5834VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
5835    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5836    bool skip = false;
5837    std::unique_lock<std::mutex> lock(global_lock);
5838    auto img_node = getImageNode(dev_data, image);
5839    if (img_node) {
5840        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(img_node->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT};
5841        // Any bound cmd buffers are now invalid
5842        invalidateCommandBuffers(img_node->cb_bindings, obj_struct);
5843        skip |= ValidateObjectNotInUse(dev_data, img_node, obj_struct);
5844    }
5845    if (!skip) {
5846        // Clean up memory mapping, bindings and range references for image
5847        auto mem_info = getMemObjInfo(dev_data, img_node->mem);
5848        if (mem_info) {
5849            RemoveImageMemoryRange(reinterpret_cast<uint64_t &>(image), mem_info);
5850            clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
5851        }
5852        // Remove image from imageMap
5853        dev_data->imageMap.erase(img_node->image);
5854
5855        const auto &subEntry = dev_data->imageSubresourceMap.find(image);
5856        if (subEntry != dev_data->imageSubresourceMap.end()) {
5857            for (const auto &pair : subEntry->second) {
5858                dev_data->imageLayoutMap.erase(pair);
5859            }
5860            dev_data->imageSubresourceMap.erase(subEntry);
5861        }
5862        lock.unlock();
5863        dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
5864    }
5865}
5866
5867static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
5868                                  const char *funcName) {
5869    bool skip_call = false;
5870    if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
5871        skip_call = log_msg(
5872            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5873            reinterpret_cast<const uint64_t &>(mem_info->mem), __LINE__, MEMTRACK_INVALID_MEM_TYPE, "MT",
5874            "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
5875            "type (0x%X) of this memory object 0x%" PRIx64 ".",
5876            funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, reinterpret_cast<const uint64_t &>(mem_info->mem));
5877    }
5878    return skip_call;
5879}
5880
5881VKAPI_ATTR VkResult VKAPI_CALL
5882BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
5883    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5884    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
5885    std::unique_lock<std::mutex> lock(global_lock);
5886    // Track objects tied to memory
5887    uint64_t buffer_handle = reinterpret_cast<uint64_t &>(buffer);
5888    bool skip_call = SetMemBinding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
5889    auto buffer_node = getBufferNode(dev_data, buffer);
5890    if (buffer_node) {
5891        VkMemoryRequirements memRequirements;
5892        dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, &memRequirements);
5893        buffer_node->mem = mem;
5894        buffer_node->memOffset = memoryOffset;
5895        buffer_node->memSize = memRequirements.size;
5896
5897        // Track and validate bound memory range information
5898        auto mem_info = getMemObjInfo(dev_data, mem);
5899        if (mem_info) {
5900            skip_call |= InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, memRequirements);
5901            skip_call |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "BindBufferMemory");
5902        }
5903
5904        // Validate memory requirements alignment
5905        if (vk_safe_modulo(memoryOffset, memRequirements.alignment) != 0) {
5906            skip_call |=
5907                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
5908                        __LINE__, DRAWSTATE_INVALID_BUFFER_MEMORY_OFFSET, "DS",
5909                        "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the "
5910                        "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
5911                        ", returned from a call to vkGetBufferMemoryRequirements with buffer",
5912                        memoryOffset, memRequirements.alignment);
5913        }
5914
5915        // Validate device limits alignments
5916        static const VkBufferUsageFlagBits usage_list[3] = {
5917            static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
5918            VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
5919            VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
5920        static const char *memory_type[3] = {"texel",
5921                                             "uniform",
5922                                             "storage"};
5923        static const char *offset_name[3] = {
5924            "minTexelBufferOffsetAlignment",
5925            "minUniformBufferOffsetAlignment",
5926            "minStorageBufferOffsetAlignment"
5927        };
5928
5929        // Keep this one fresh!
5930        const VkDeviceSize offset_requirement[3] = {
5931            dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
5932            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
5933            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment
5934        };
5935        VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
5936
5937        for (int i = 0; i < 3; i++) {
5938            if (usage & usage_list[i]) {
5939                if (vk_safe_modulo(memoryOffset, offset_requirement[i]) != 0) {
5940                    skip_call |=
5941                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
5942                                0, __LINE__, DRAWSTATE_INVALID_TEXEL_BUFFER_OFFSET, "DS",
5943                                "vkBindBufferMemory(): %s memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5944                                "device limit %s 0x%" PRIxLEAST64,
5945                                memory_type[i], memoryOffset, offset_name[i], offset_requirement[i]);
5946                }
5947            }
5948        }
5949    }
5950    print_mem_list(dev_data);
5951    lock.unlock();
5952    if (!skip_call) {
5953        result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
5954    }
5955    return result;
5956}
5957
5958VKAPI_ATTR void VKAPI_CALL
5959GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
5960    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5961    // TODO : What to track here?
5962    //   Could potentially save returned mem requirements and validate values passed into BindBufferMemory
5963    my_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
5964}
5965
5966VKAPI_ATTR void VKAPI_CALL
5967GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
5968    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5969    // TODO : What to track here?
5970    //   Could potentially save returned mem requirements and validate values passed into BindImageMemory
5971    my_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
5972}
5973
5974VKAPI_ATTR void VKAPI_CALL
5975DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
5976    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5977    bool skip = false;
5978    std::unique_lock<std::mutex> lock(global_lock);
5979    auto view_state = getImageViewState(dev_data, imageView);
5980    if (view_state) {
5981        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(imageView), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT};
5982        skip |= ValidateObjectNotInUse(dev_data, view_state, obj_struct);
5983        // Any bound cmd buffers are now invalid
5984        invalidateCommandBuffers(view_state->cb_bindings, obj_struct);
5985    }
5986    if (!skip) {
5987        dev_data->imageViewMap.erase(imageView);
5988        lock.unlock();
5989        dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
5990    }
5991}
5992
5993VKAPI_ATTR void VKAPI_CALL
5994DestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
5995    layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5996
5997    std::unique_lock<std::mutex> lock(global_lock);
5998    my_data->shaderModuleMap.erase(shaderModule);
5999    lock.unlock();
6000
6001    my_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
6002}
6003
6004VKAPI_ATTR void VKAPI_CALL
6005DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
6006    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6007    bool skip = false;
6008    std::unique_lock<std::mutex> lock(global_lock);
6009    auto pipe_node = getPipeline(dev_data, pipeline);
6010    if (pipe_node) {
6011        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT};
6012        skip |= ValidateObjectNotInUse(dev_data, pipe_node, obj_struct);
6013        // Any bound cmd buffers are now invalid
6014        invalidateCommandBuffers(pipe_node->cb_bindings, obj_struct);
6015    }
6016    if (!skip) {
6017        dev_data->pipelineMap.erase(pipeline);
6018        lock.unlock();
6019        dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
6020    }
6021}
6022
6023VKAPI_ATTR void VKAPI_CALL
6024DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
6025    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6026    std::unique_lock<std::mutex> lock(global_lock);
6027    dev_data->pipelineLayoutMap.erase(pipelineLayout);
6028    lock.unlock();
6029
6030    dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
6031}
6032
6033VKAPI_ATTR void VKAPI_CALL
6034DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
6035    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6036    bool skip = false;
6037    std::unique_lock<std::mutex> lock(global_lock);
6038    auto sampler_node = getSamplerNode(dev_data, sampler);
6039    if (sampler_node) {
6040        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT};
6041        skip |= ValidateObjectNotInUse(dev_data, sampler_node, obj_struct);
6042        // Any bound cmd buffers are now invalid
6043        invalidateCommandBuffers(sampler_node->cb_bindings, obj_struct);
6044    }
6045    if (!skip) {
6046        dev_data->samplerMap.erase(sampler);
6047        lock.unlock();
6048        dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
6049    }
6050}
6051
6052VKAPI_ATTR void VKAPI_CALL
6053DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
6054    // TODO : Clean up any internal data structures using this obj.
6055    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6056        ->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
6057}
6058
6059VKAPI_ATTR void VKAPI_CALL
6060DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
6061    // TODO : Add checks for VALIDATION_ERROR_00901
6062    // TODO : Clean up any internal data structures using this obj.
6063    get_my_data_ptr(get_dispatch_key(device), layer_data_map)
6064        ->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
6065}
6066// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result
6067//  If this is a secondary command buffer, then make sure its primary is also in-flight
6068//  If primary is not in-flight, then remove secondary from global in-flight set
6069// This function is only valid at a point when cmdBuffer is being reset or freed
6070static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action) {
6071    bool skip_call = false;
6072    if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
6073        // Primary CB or secondary where primary is also in-flight is an error
6074        if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
6075            (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
6076            skip_call |= log_msg(
6077                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6078                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
6079                "Attempt to %s command buffer (0x%" PRIxLEAST64 ") which is in use.", action,
6080                reinterpret_cast<const uint64_t &>(cb_node->commandBuffer));
6081        }
6082    }
6083    return skip_call;
6084}
6085
6086// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
6087static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action) {
6088    bool skip_call = false;
6089    for (auto cmd_buffer : pPool->commandBuffers) {
6090        if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
6091            skip_call |= checkCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action);
6092        }
6093    }
6094    return skip_call;
6095}
6096
6097static void clearCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool) {
6098    for (auto cmd_buffer : pPool->commandBuffers) {
6099        dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
6100    }
6101}
6102
6103VKAPI_ATTR void VKAPI_CALL
6104FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
6105    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6106    bool skip_call = false;
6107    std::unique_lock<std::mutex> lock(global_lock);
6108
6109    for (uint32_t i = 0; i < commandBufferCount; i++) {
6110        auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
6111        // Delete CB information structure, and remove from commandBufferMap
6112        if (cb_node) {
6113            skip_call |= checkCommandBufferInFlight(dev_data, cb_node, "free");
6114        }
6115    }
6116
6117    if (skip_call)
6118        return;
6119
6120    auto pPool = getCommandPoolNode(dev_data, commandPool);
6121    for (uint32_t i = 0; i < commandBufferCount; i++) {
6122        auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
6123        // Delete CB information structure, and remove from commandBufferMap
6124        if (cb_node) {
6125            dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer);
6126            // reset prior to delete for data clean-up
6127            resetCB(dev_data, cb_node->commandBuffer);
6128            dev_data->commandBufferMap.erase(cb_node->commandBuffer);
6129            delete cb_node;
6130        }
6131
6132        // Remove commandBuffer reference from commandPoolMap
6133        pPool->commandBuffers.remove(pCommandBuffers[i]);
6134    }
6135    printCBList(dev_data);
6136    lock.unlock();
6137
6138    dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
6139}
6140
6141VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
6142                                                 const VkAllocationCallbacks *pAllocator,
6143                                                 VkCommandPool *pCommandPool) {
6144    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6145
6146    VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
6147
6148    if (VK_SUCCESS == result) {
6149        std::lock_guard<std::mutex> lock(global_lock);
6150        dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
6151        dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
6152    }
6153    return result;
6154}
6155
6156VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
6157                                               const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
6158
6159    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6160    VkResult result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
6161    if (result == VK_SUCCESS) {
6162        std::lock_guard<std::mutex> lock(global_lock);
6163        QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
6164        qp_node->createInfo = *pCreateInfo;
6165    }
6166    return result;
6167}
6168
6169// Destroy commandPool along with all of the commandBuffers allocated from that pool
6170VKAPI_ATTR void VKAPI_CALL
6171DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
6172    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6173    bool skip_call = false;
6174    std::unique_lock<std::mutex> lock(global_lock);
6175    // Verify that command buffers in pool are complete (not in-flight)
6176    auto pPool = getCommandPoolNode(dev_data, commandPool);
6177    skip_call |= checkCommandBuffersInFlight(dev_data, pPool, "destroy command pool with");
6178
6179    if (skip_call)
6180        return;
6181    // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandBufferMap
6182    clearCommandBuffersInFlight(dev_data, pPool);
6183    for (auto cb : pPool->commandBuffers) {
6184        clear_cmd_buf_and_mem_references(dev_data, cb);
6185        auto cb_node = getCBNode(dev_data, cb);
6186        // Remove references to this cb_node prior to delete
6187        // TODO : Need better solution here, resetCB?
6188        for (auto obj : cb_node->object_bindings) {
6189            removeCommandBufferBinding(dev_data, &obj, cb_node);
6190        }
6191        for (auto framebuffer : cb_node->framebuffers) {
6192            auto fb_node = getFramebuffer(dev_data, framebuffer);
6193            if (fb_node)
6194                fb_node->cb_bindings.erase(cb_node);
6195        }
6196        dev_data->commandBufferMap.erase(cb); // Remove this command buffer
6197        delete cb_node;                       // delete CB info structure
6198    }
6199    dev_data->commandPoolMap.erase(commandPool);
6200    lock.unlock();
6201
6202    dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
6203}
6204
6205VKAPI_ATTR VkResult VKAPI_CALL
6206ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
6207    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6208    bool skip_call = false;
6209
6210    std::unique_lock<std::mutex> lock(global_lock);
6211    auto pPool = getCommandPoolNode(dev_data, commandPool);
6212    skip_call |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with");
6213    lock.unlock();
6214
6215    if (skip_call)
6216        return VK_ERROR_VALIDATION_FAILED_EXT;
6217
6218    VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
6219
6220    // Reset all of the CBs allocated from this pool
6221    if (VK_SUCCESS == result) {
6222        lock.lock();
6223        clearCommandBuffersInFlight(dev_data, pPool);
6224        for (auto cmdBuffer : pPool->commandBuffers) {
6225            resetCB(dev_data, cmdBuffer);
6226        }
6227        lock.unlock();
6228    }
6229    return result;
6230}
6231
6232VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
6233    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6234    bool skip_call = false;
6235    std::unique_lock<std::mutex> lock(global_lock);
6236    for (uint32_t i = 0; i < fenceCount; ++i) {
6237        auto pFence = getFenceNode(dev_data, pFences[i]);
6238        if (pFence && pFence->state == FENCE_INFLIGHT) {
6239            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
6240                                 reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
6241                                 "Fence 0x%" PRIx64 " is in use.", reinterpret_cast<const uint64_t &>(pFences[i]));
6242        }
6243    }
6244    lock.unlock();
6245
6246    if (skip_call)
6247        return VK_ERROR_VALIDATION_FAILED_EXT;
6248
6249    VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
6250
6251    if (result == VK_SUCCESS) {
6252        lock.lock();
6253        for (uint32_t i = 0; i < fenceCount; ++i) {
6254            auto pFence = getFenceNode(dev_data, pFences[i]);
6255            if (pFence) {
6256                pFence->state = FENCE_UNSIGNALED;
6257            }
6258        }
6259        lock.unlock();
6260    }
6261
6262    return result;
6263}
6264
6265// For given cb_nodes, invalidate them and track object causing invalidation
6266void invalidateCommandBuffers(std::unordered_set<GLOBAL_CB_NODE *> cb_nodes, VK_OBJECT obj) {
6267    for (auto cb_node : cb_nodes) {
6268        cb_node->state = CB_INVALID;
6269        cb_node->broken_bindings.push_back(obj);
6270    }
6271}
6272
6273VKAPI_ATTR void VKAPI_CALL
6274DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
6275    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6276    std::unique_lock<std::mutex> lock(global_lock);
6277    auto fb_node = getFramebuffer(dev_data, framebuffer);
6278    if (fb_node) {
6279        invalidateCommandBuffers(fb_node->cb_bindings,
6280                                 {reinterpret_cast<uint64_t &>(fb_node->framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT});
6281        dev_data->frameBufferMap.erase(fb_node->framebuffer);
6282    }
6283    lock.unlock();
6284    dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
6285}
6286
6287VKAPI_ATTR void VKAPI_CALL
6288DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
6289    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6290    bool skip = false;
6291    std::unique_lock<std::mutex> lock(global_lock);
6292    auto rp_state = getRenderPass(dev_data, renderPass);
6293    if (rp_state) {
6294        VK_OBJECT obj_struct = {reinterpret_cast<uint64_t &>(renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT};
6295        skip |= ValidateObjectNotInUse(dev_data, rp_state, obj_struct);
6296        // Any bound cmd buffers are now invalid
6297        invalidateCommandBuffers(rp_state->cb_bindings, obj_struct);
6298    }
6299    if (!skip) {
6300        dev_data->renderPassMap.erase(renderPass);
6301        lock.unlock();
6302        dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
6303    }
6304}
6305
6306VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
6307                                            const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
6308    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6309
6310    VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
6311
6312    if (VK_SUCCESS == result) {
6313        std::lock_guard<std::mutex> lock(global_lock);
6314        // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
6315        dev_data->bufferMap.insert(std::make_pair(*pBuffer, unique_ptr<BUFFER_NODE>(new BUFFER_NODE(*pBuffer, pCreateInfo))));
6316    }
6317    return result;
6318}
6319
6320static bool PreCallValidateCreateBufferView(layer_data *dev_data, const VkBufferViewCreateInfo *pCreateInfo) {
6321    bool skip_call = false;
6322    BUFFER_NODE *buf_node = getBufferNode(dev_data, pCreateInfo->buffer);
6323    // If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time
6324    if (buf_node) {
6325        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buf_node, "vkCreateBufferView()");
6326        // In order to create a valid buffer view, the buffer must have been created with at least one of the
6327        // following flags:  UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
6328        skip_call |= ValidateBufferUsageFlags(dev_data, buf_node,
6329                                              VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT,
6330                                              false, "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
6331    }
6332    return skip_call;
6333}
6334
6335VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6336                                                const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
6337    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6338    std::unique_lock<std::mutex> lock(global_lock);
6339    bool skip_call = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
6340    lock.unlock();
6341    if (skip_call)
6342        return VK_ERROR_VALIDATION_FAILED_EXT;
6343    VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
6344    if (VK_SUCCESS == result) {
6345        lock.lock();
6346        dev_data->bufferViewMap[*pView] = unique_ptr<BUFFER_VIEW_STATE>(new BUFFER_VIEW_STATE(*pView, pCreateInfo));
6347        lock.unlock();
6348    }
6349    return result;
6350}
6351
6352VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
6353                                           const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
6354    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6355
6356    VkResult result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
6357
6358    if (VK_SUCCESS == result) {
6359        std::lock_guard<std::mutex> lock(global_lock);
6360        IMAGE_LAYOUT_NODE image_node;
6361        image_node.layout = pCreateInfo->initialLayout;
6362        image_node.format = pCreateInfo->format;
6363        dev_data->imageMap.insert(std::make_pair(*pImage, unique_ptr<IMAGE_NODE>(new IMAGE_NODE(*pImage, pCreateInfo))));
6364        ImageSubresourcePair subpair = {*pImage, false, VkImageSubresource()};
6365        dev_data->imageSubresourceMap[*pImage].push_back(subpair);
6366        dev_data->imageLayoutMap[subpair] = image_node;
6367    }
6368    return result;
6369}
6370
6371static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
6372    /* expects global_lock to be held by caller */
6373
6374    auto image_node = getImageNode(dev_data, image);
6375    if (image_node) {
6376        /* If the caller used the special values VK_REMAINING_MIP_LEVELS and
6377         * VK_REMAINING_ARRAY_LAYERS, resolve them now in our internal state to
6378         * the actual values.
6379         */
6380        if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
6381            range->levelCount = image_node->createInfo.mipLevels - range->baseMipLevel;
6382        }
6383
6384        if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
6385            range->layerCount = image_node->createInfo.arrayLayers - range->baseArrayLayer;
6386        }
6387    }
6388}
6389
6390// Return the correct layer/level counts if the caller used the special
6391// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
6392static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6393                                         VkImage image) {
6394    /* expects global_lock to be held by caller */
6395
6396    *levels = range.levelCount;
6397    *layers = range.layerCount;
6398    auto image_node = getImageNode(dev_data, image);
6399    if (image_node) {
6400        if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
6401            *levels = image_node->createInfo.mipLevels - range.baseMipLevel;
6402        }
6403        if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
6404            *layers = image_node->createInfo.arrayLayers - range.baseArrayLayer;
6405        }
6406    }
6407}
6408
6409static bool PreCallValidateCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *pCreateInfo) {
6410    bool skip_call = false;
6411    IMAGE_NODE *image_node = getImageNode(dev_data, pCreateInfo->image);
6412    if (image_node) {
6413        skip_call |= ValidateImageUsageFlags(
6414            dev_data, image_node, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
6415                                      VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
6416            false, "vkCreateImageView()",
6417            "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT|DEPTH_STENCIL_ATTACHMENT|INPUT_ATTACHMENT]_BIT");
6418        // If this isn't a sparse image, it needs to have memory backing it at CreateImageView time
6419        skip_call |= ValidateMemoryIsBoundToImage(dev_data, image_node, "vkCreateImageView()");
6420    }
6421    return skip_call;
6422}
6423
6424static inline void PostCallRecordCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *pCreateInfo, VkImageView view) {
6425    dev_data->imageViewMap[view] = unique_ptr<IMAGE_VIEW_STATE>(new IMAGE_VIEW_STATE(view, pCreateInfo));
6426    ResolveRemainingLevelsLayers(dev_data, &dev_data->imageViewMap[view].get()->create_info.subresourceRange, pCreateInfo->image);
6427}
6428
6429VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6430                                               const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
6431    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6432    std::unique_lock<std::mutex> lock(global_lock);
6433    bool skip_call = PreCallValidateCreateImageView(dev_data, pCreateInfo);
6434    lock.unlock();
6435    if (skip_call)
6436        return VK_ERROR_VALIDATION_FAILED_EXT;
6437    VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
6438    if (VK_SUCCESS == result) {
6439        lock.lock();
6440        PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
6441        lock.unlock();
6442    }
6443
6444    return result;
6445}
6446
6447VKAPI_ATTR VkResult VKAPI_CALL
6448CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
6449    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6450    VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
6451    if (VK_SUCCESS == result) {
6452        std::lock_guard<std::mutex> lock(global_lock);
6453        auto &fence_node = dev_data->fenceMap[*pFence];
6454        fence_node.fence = *pFence;
6455        fence_node.createInfo = *pCreateInfo;
6456        fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
6457    }
6458    return result;
6459}
6460
6461// TODO handle pipeline caches
6462VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6463                                                   const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
6464    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6465    VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
6466    return result;
6467}
6468
6469VKAPI_ATTR void VKAPI_CALL
6470DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
6471    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6472    dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
6473}
6474
6475VKAPI_ATTR VkResult VKAPI_CALL
6476GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
6477    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6478    VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
6479    return result;
6480}
6481
6482VKAPI_ATTR VkResult VKAPI_CALL
6483MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
6484    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6485    VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
6486    return result;
6487}
6488
6489// utility function to set collective state for pipeline
6490void set_pipeline_state(PIPELINE_NODE *pPipe) {
6491    // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
6492    if (pPipe->graphicsPipelineCI.pColorBlendState) {
6493        for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
6494            if (VK_TRUE == pPipe->attachments[i].blendEnable) {
6495                if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6496                     (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6497                    ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6498                     (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6499                    ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6500                     (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6501                    ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6502                     (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
6503                    pPipe->blendConstantsEnabled = true;
6504                }
6505            }
6506        }
6507    }
6508}
6509
6510VKAPI_ATTR VkResult VKAPI_CALL
6511CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6512                        const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6513                        VkPipeline *pPipelines) {
6514    VkResult result = VK_SUCCESS;
6515    // TODO What to do with pipelineCache?
6516    // The order of operations here is a little convoluted but gets the job done
6517    //  1. Pipeline create state is first shadowed into PIPELINE_NODE struct
6518    //  2. Create state is then validated (which uses flags setup during shadowing)
6519    //  3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
6520    bool skip_call = false;
6521    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6522    vector<PIPELINE_NODE *> pPipeNode(count);
6523    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6524
6525    uint32_t i = 0;
6526    std::unique_lock<std::mutex> lock(global_lock);
6527
6528    for (i = 0; i < count; i++) {
6529        pPipeNode[i] = new PIPELINE_NODE;
6530        pPipeNode[i]->initGraphicsPipeline(&pCreateInfos[i]);
6531        pPipeNode[i]->render_pass_ci.initialize(getRenderPass(dev_data, pCreateInfos[i].renderPass)->createInfo.ptr());
6532        pPipeNode[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6533
6534        skip_call |= verifyPipelineCreateState(dev_data, device, pPipeNode, i);
6535    }
6536
6537    if (!skip_call) {
6538        lock.unlock();
6539        result =
6540            dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6541        lock.lock();
6542        for (i = 0; i < count; i++) {
6543            pPipeNode[i]->pipeline = pPipelines[i];
6544            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6545        }
6546        lock.unlock();
6547    } else {
6548        for (i = 0; i < count; i++) {
6549            delete pPipeNode[i];
6550        }
6551        lock.unlock();
6552        return VK_ERROR_VALIDATION_FAILED_EXT;
6553    }
6554    return result;
6555}
6556
6557VKAPI_ATTR VkResult VKAPI_CALL
6558CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6559                       const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6560                       VkPipeline *pPipelines) {
6561    VkResult result = VK_SUCCESS;
6562    bool skip_call = false;
6563
6564    // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
6565    vector<PIPELINE_NODE *> pPipeNode(count);
6566    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6567
6568    uint32_t i = 0;
6569    std::unique_lock<std::mutex> lock(global_lock);
6570    for (i = 0; i < count; i++) {
6571        // TODO: Verify compute stage bits
6572
6573        // Create and initialize internal tracking data structure
6574        pPipeNode[i] = new PIPELINE_NODE;
6575        pPipeNode[i]->initComputePipeline(&pCreateInfos[i]);
6576        pPipeNode[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
6577        // memcpy(&pPipeNode[i]->computePipelineCI, (const void *)&pCreateInfos[i], sizeof(VkComputePipelineCreateInfo));
6578
6579        // TODO: Add Compute Pipeline Verification
6580        skip_call |= !validate_compute_pipeline(dev_data->report_data, pPipeNode[i], &dev_data->enabled_features,
6581                                                dev_data->shaderModuleMap);
6582        // skip_call |= verifyPipelineCreateState(dev_data, device, pPipeNode[i]);
6583    }
6584
6585    if (!skip_call) {
6586        lock.unlock();
6587        result =
6588            dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6589        lock.lock();
6590        for (i = 0; i < count; i++) {
6591            pPipeNode[i]->pipeline = pPipelines[i];
6592            dev_data->pipelineMap[pPipeNode[i]->pipeline] = pPipeNode[i];
6593        }
6594        lock.unlock();
6595    } else {
6596        for (i = 0; i < count; i++) {
6597            // Clean up any locally allocated data structures
6598            delete pPipeNode[i];
6599        }
6600        lock.unlock();
6601        return VK_ERROR_VALIDATION_FAILED_EXT;
6602    }
6603    return result;
6604}
6605
6606VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6607                                             const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
6608    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6609    VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
6610    if (VK_SUCCESS == result) {
6611        std::lock_guard<std::mutex> lock(global_lock);
6612        dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_NODE>(new SAMPLER_NODE(pSampler, pCreateInfo));
6613    }
6614    return result;
6615}
6616
6617VKAPI_ATTR VkResult VKAPI_CALL
6618CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6619                          const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
6620    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6621    VkResult result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6622    if (VK_SUCCESS == result) {
6623        // TODOSC : Capture layout bindings set
6624        std::lock_guard<std::mutex> lock(global_lock);
6625        dev_data->descriptorSetLayoutMap[*pSetLayout] =
6626            new cvdescriptorset::DescriptorSetLayout(dev_data->report_data, pCreateInfo, *pSetLayout);
6627    }
6628    return result;
6629}
6630
6631// Used by CreatePipelineLayout and CmdPushConstants.
6632// Note that the index argument is optional and only used by CreatePipelineLayout.
6633static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6634                                      const char *caller_name, uint32_t index = 0) {
6635    if (dev_data->instance_state->disabled.push_constant_range)
6636        return false;
6637    uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
6638    bool skip_call = false;
6639    // Check that offset + size don't exceed the max.
6640    // Prevent arithetic overflow here by avoiding addition and testing in this order.
6641    // TODO : This check combines VALIDATION_ERROR_00877 & 880, need to break out separately
6642    if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
6643        // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
6644        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6645            skip_call |=
6646                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6647                        VALIDATION_ERROR_00877, "DS", "%s call has push constants index %u with offset %u and size %u that "
6648                                                      "exceeds this device's maxPushConstantSize of %u. %s",
6649                        caller_name, index, offset, size, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00877]);
6650        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6651            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6652                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with offset %u and size %u that "
6653                                                                       "exceeds this device's maxPushConstantSize of %u.",
6654                                 caller_name, offset, size, maxPushConstantsSize);
6655        } else {
6656            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6657                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6658        }
6659    }
6660    // size needs to be non-zero and a multiple of 4.
6661    // TODO : This check combines VALIDATION_ERROR_00878 & 879, need to break out separately
6662    if ((size == 0) || ((size & 0x3) != 0)) {
6663        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6664            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6665                                 VALIDATION_ERROR_00878, "DS", "%s call has push constants index %u with "
6666                                                               "size %u. Size must be greater than zero and a multiple of 4. %s",
6667                                 caller_name, index, size, validation_error_map[VALIDATION_ERROR_00878]);
6668        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6669            skip_call |=
6670                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6671                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
6672                                                              "size %u. Size must be greater than zero and a multiple of 4.",
6673                        caller_name, size);
6674        } else {
6675            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6676                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6677        }
6678    }
6679    // offset needs to be a multiple of 4.
6680    if ((offset & 0x3) != 0) {
6681        if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
6682            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6683                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants index %u with "
6684                                                                       "offset %u. Offset must be a multiple of 4.",
6685                                 caller_name, index, offset);
6686        } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
6687            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6688                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "%s call has push constants with "
6689                                                                       "offset %u. Offset must be a multiple of 4.",
6690                                 caller_name, offset);
6691        } else {
6692            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6693                                 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
6694        }
6695    }
6696    return skip_call;
6697}
6698
6699VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
6700                                                    const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
6701    bool skip_call = false;
6702    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6703    // TODO : Add checks for VALIDATION_ERRORS 865-871
6704    // Push Constant Range checks
6705    uint32_t i, j;
6706    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6707        skip_call |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
6708                                               pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
6709        if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
6710            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6711                                 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has no stageFlags set.");
6712        }
6713    }
6714    if (skip_call)
6715        return VK_ERROR_VALIDATION_FAILED_EXT;
6716
6717    // Each range has been validated.  Now check for overlap between ranges (if they are good).
6718    // There's no explicit Valid Usage language against this, so issue a warning instead of an error.
6719    for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6720        for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
6721            const uint32_t minA = pCreateInfo->pPushConstantRanges[i].offset;
6722            const uint32_t maxA = minA + pCreateInfo->pPushConstantRanges[i].size;
6723            const uint32_t minB = pCreateInfo->pPushConstantRanges[j].offset;
6724            const uint32_t maxB = minB + pCreateInfo->pPushConstantRanges[j].size;
6725            if ((minA <= minB && maxA > minB) || (minB <= minA && maxB > minA)) {
6726                skip_call |=
6727                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6728                            DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constants with "
6729                                                                  "overlapping ranges: %u:[%u, %u), %u:[%u, %u)",
6730                            i, minA, maxA, j, minB, maxB);
6731            }
6732        }
6733    }
6734
6735    VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
6736    if (VK_SUCCESS == result) {
6737        std::lock_guard<std::mutex> lock(global_lock);
6738        PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
6739        plNode.layout = *pPipelineLayout;
6740        plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
6741        for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
6742            plNode.set_layouts[i] = getDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
6743        }
6744        plNode.push_constant_ranges.resize(pCreateInfo->pushConstantRangeCount);
6745        for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
6746            plNode.push_constant_ranges[i] = pCreateInfo->pPushConstantRanges[i];
6747        }
6748    }
6749    return result;
6750}
6751
6752VKAPI_ATTR VkResult VKAPI_CALL
6753CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
6754                     VkDescriptorPool *pDescriptorPool) {
6755    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6756    VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
6757    if (VK_SUCCESS == result) {
6758        // Insert this pool into Global Pool LL at head
6759        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6760                    (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool 0x%" PRIxLEAST64,
6761                    (uint64_t)*pDescriptorPool))
6762            return VK_ERROR_VALIDATION_FAILED_EXT;
6763        DESCRIPTOR_POOL_NODE *pNewNode = new DESCRIPTOR_POOL_NODE(*pDescriptorPool, pCreateInfo);
6764        if (NULL == pNewNode) {
6765            if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6766                        (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
6767                        "Out of memory while attempting to allocate DESCRIPTOR_POOL_NODE in vkCreateDescriptorPool()"))
6768                return VK_ERROR_VALIDATION_FAILED_EXT;
6769        } else {
6770            std::lock_guard<std::mutex> lock(global_lock);
6771            dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
6772        }
6773    } else {
6774        // Need to do anything if pool create fails?
6775    }
6776    return result;
6777}
6778
6779VKAPI_ATTR VkResult VKAPI_CALL
6780ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
6781    // TODO : Add checks for VALIDATION_ERROR_00928
6782    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6783    VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
6784    if (VK_SUCCESS == result) {
6785        std::lock_guard<std::mutex> lock(global_lock);
6786        clearDescriptorPool(dev_data, device, descriptorPool, flags);
6787    }
6788    return result;
6789}
6790// Ensure the pool contains enough descriptors and descriptor sets to satisfy
6791// an allocation request. Fills common_data with the total number of descriptors of each type required,
6792// as well as DescriptorSetLayout ptrs used for later update.
6793static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6794                                                  cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6795    if (dev_data->instance_state->disabled.allocate_descriptor_sets)
6796        return false;
6797    // All state checks for AllocateDescriptorSets is done in single function
6798    return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data->report_data, pAllocateInfo, dev_data, common_data);
6799}
6800// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
6801static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
6802                                                 VkDescriptorSet *pDescriptorSets,
6803                                                 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
6804    // All the updates are contained in a single cvdescriptorset function
6805    cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
6806                                                   &dev_data->setMap, dev_data);
6807}
6808
6809VKAPI_ATTR VkResult VKAPI_CALL
6810AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
6811    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6812    std::unique_lock<std::mutex> lock(global_lock);
6813    cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
6814    bool skip_call = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
6815    lock.unlock();
6816
6817    if (skip_call)
6818        return VK_ERROR_VALIDATION_FAILED_EXT;
6819
6820    VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
6821
6822    if (VK_SUCCESS == result) {
6823        lock.lock();
6824        PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
6825        lock.unlock();
6826    }
6827    return result;
6828}
6829// Verify state before freeing DescriptorSets
6830static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6831                                              const VkDescriptorSet *descriptor_sets) {
6832    if (dev_data->instance_state->disabled.free_descriptor_sets)
6833        return false;
6834    bool skip_call = false;
6835    // First make sure sets being destroyed are not currently in-use
6836    for (uint32_t i = 0; i < count; ++i)
6837        skip_call |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
6838
6839    DESCRIPTOR_POOL_NODE *pool_node = getPoolNode(dev_data, pool);
6840    if (pool_node && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_node->createInfo.flags)) {
6841        // Can't Free from a NON_FREE pool
6842        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
6843                             reinterpret_cast<uint64_t &>(pool), __LINE__, VALIDATION_ERROR_00922, "DS",
6844                             "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
6845                             "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT. %s",
6846                             validation_error_map[VALIDATION_ERROR_00922]);
6847    }
6848    return skip_call;
6849}
6850// Sets have been removed from the pool so update underlying state
6851static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
6852                                             const VkDescriptorSet *descriptor_sets) {
6853    DESCRIPTOR_POOL_NODE *pool_state = getPoolNode(dev_data, pool);
6854    // Update available descriptor sets in pool
6855    pool_state->availableSets += count;
6856
6857    // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
6858    for (uint32_t i = 0; i < count; ++i) {
6859        auto set_state = dev_data->setMap[descriptor_sets[i]];
6860        uint32_t type_index = 0, descriptor_count = 0;
6861        for (uint32_t j = 0; j < set_state->GetBindingCount(); ++j) {
6862            type_index = static_cast<uint32_t>(set_state->GetTypeFromIndex(j));
6863            descriptor_count = set_state->GetDescriptorCountFromIndex(j);
6864            pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
6865        }
6866        freeDescriptorSet(dev_data, set_state);
6867        pool_state->sets.erase(set_state);
6868    }
6869}
6870
6871VKAPI_ATTR VkResult VKAPI_CALL
6872FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
6873    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6874    // Make sure that no sets being destroyed are in-flight
6875    std::unique_lock<std::mutex> lock(global_lock);
6876    bool skip_call = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6877    lock.unlock();
6878
6879    if (skip_call)
6880        return VK_ERROR_VALIDATION_FAILED_EXT;
6881    VkResult result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
6882    if (VK_SUCCESS == result) {
6883        lock.lock();
6884        PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
6885        lock.unlock();
6886    }
6887    return result;
6888}
6889// TODO : This is a Proof-of-concept for core validation architecture
6890//  Really we'll want to break out these functions to separate files but
6891//  keeping it all together here to prove out design
6892// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
6893static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6894                                                const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6895                                                const VkCopyDescriptorSet *pDescriptorCopies) {
6896    // First thing to do is perform map look-ups.
6897    // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
6898    //  so we can't just do a single map look-up up-front, but do them individually in functions below
6899
6900    // Now make call(s) that validate state, but don't perform state updates in this function
6901    // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
6902    //  namespace which will parse params and make calls into specific class instances
6903    return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
6904                                                         descriptorCopyCount, pDescriptorCopies);
6905}
6906// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
6907static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
6908                                               const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
6909                                               const VkCopyDescriptorSet *pDescriptorCopies) {
6910    cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6911                                                 pDescriptorCopies);
6912}
6913
6914VKAPI_ATTR void VKAPI_CALL
6915UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
6916                     uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
6917    // Only map look-up at top level is for device-level layer_data
6918    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6919    std::unique_lock<std::mutex> lock(global_lock);
6920    bool skip_call = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6921                                                         pDescriptorCopies);
6922    lock.unlock();
6923    if (!skip_call) {
6924        dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6925                                                      pDescriptorCopies);
6926        lock.lock();
6927        // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
6928        PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
6929                                           pDescriptorCopies);
6930    }
6931}
6932
6933VKAPI_ATTR VkResult VKAPI_CALL
6934AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
6935    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6936    VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
6937    if (VK_SUCCESS == result) {
6938        std::unique_lock<std::mutex> lock(global_lock);
6939        auto pPool = getCommandPoolNode(dev_data, pCreateInfo->commandPool);
6940
6941        if (pPool) {
6942            for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
6943                // Add command buffer to its commandPool map
6944                pPool->commandBuffers.push_back(pCommandBuffer[i]);
6945                GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
6946                // Add command buffer to map
6947                dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
6948                resetCB(dev_data, pCommandBuffer[i]);
6949                pCB->createInfo = *pCreateInfo;
6950                pCB->device = device;
6951            }
6952        }
6953        printCBList(dev_data);
6954        lock.unlock();
6955    }
6956    return result;
6957}
6958
6959// Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
6960static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_NODE *fb_state) {
6961    fb_state->cb_bindings.insert(cb_state);
6962    for (auto attachment : fb_state->attachments) {
6963        auto view_state = attachment.view_state;
6964        if (view_state) {
6965            AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
6966        }
6967        auto rp_state = getRenderPass(dev_data, fb_state->createInfo.renderPass);
6968        if (rp_state) {
6969            addCommandBufferBinding(
6970                &rp_state->cb_bindings,
6971                {reinterpret_cast<uint64_t &>(rp_state->renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT}, cb_state);
6972        }
6973    }
6974}
6975
6976VKAPI_ATTR VkResult VKAPI_CALL
6977BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
6978    bool skip_call = false;
6979    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
6980    std::unique_lock<std::mutex> lock(global_lock);
6981    // Validate command buffer level
6982    GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
6983    if (cb_node) {
6984        // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
6985        if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
6986            skip_call |=
6987                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6988                        (uint64_t)commandBuffer, __LINE__, MEMTRACK_RESET_CB_WHILE_IN_FLIGHT, "MEM",
6989                        "Calling vkBeginCommandBuffer() on active CB 0x%p before it has completed. "
6990                        "You must check CB fence before this call.",
6991                        commandBuffer);
6992        }
6993        clear_cmd_buf_and_mem_references(dev_data, cb_node);
6994        if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
6995            // Secondary Command Buffer
6996            const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
6997            if (!pInfo) {
6998                skip_call |=
6999                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7000                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7001                            "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info.",
7002                            reinterpret_cast<void *>(commandBuffer));
7003            } else {
7004                if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
7005                    if (!pInfo->renderPass) { // renderpass should NOT be null for a Secondary CB
7006                        skip_call |= log_msg(
7007                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7008                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7009                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must specify a valid renderpass parameter.",
7010                            reinterpret_cast<void *>(commandBuffer));
7011                    }
7012                    if (!pInfo->framebuffer) { // framebuffer may be null for a Secondary CB, but this affects perf
7013                        skip_call |= log_msg(
7014                            dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7015                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7016                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) may perform better if a "
7017                            "valid framebuffer parameter is specified.",
7018                            reinterpret_cast<void *>(commandBuffer));
7019                    } else {
7020                        string errorString = "";
7021                        auto framebuffer = getFramebuffer(dev_data, pInfo->framebuffer);
7022                        if (framebuffer) {
7023                            if ((framebuffer->createInfo.renderPass != pInfo->renderPass) &&
7024                                !verify_renderpass_compatibility(dev_data, framebuffer->renderPassCreateInfo.ptr(),
7025                                                                 getRenderPass(dev_data, pInfo->renderPass)->createInfo.ptr(),
7026                                                                 errorString)) {
7027                                // renderPass that framebuffer was created with must be compatible with local renderPass
7028                                skip_call |= log_msg(
7029                                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7030                                    VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
7031                                    __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
7032                                    "vkBeginCommandBuffer(): Secondary Command "
7033                                    "Buffer (0x%p) renderPass (0x%" PRIxLEAST64 ") is incompatible w/ framebuffer "
7034                                    "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
7035                                    reinterpret_cast<void *>(commandBuffer), reinterpret_cast<const uint64_t &>(pInfo->renderPass),
7036                                    reinterpret_cast<const uint64_t &>(pInfo->framebuffer),
7037                                    reinterpret_cast<uint64_t &>(framebuffer->createInfo.renderPass), errorString.c_str());
7038                            }
7039                            // Connect this framebuffer and its children to this cmdBuffer
7040                            AddFramebufferBinding(dev_data, cb_node, framebuffer);
7041                        }
7042                    }
7043                }
7044                if ((pInfo->occlusionQueryEnable == VK_FALSE ||
7045                     dev_data->enabled_features.occlusionQueryPrecise == VK_FALSE) &&
7046                    (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
7047                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7048                                         VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
7049                                         __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7050                                         "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
7051                                         "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
7052                                         "support precise occlusion queries.",
7053                                         reinterpret_cast<void *>(commandBuffer));
7054                }
7055            }
7056            if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
7057                auto renderPass = getRenderPass(dev_data, pInfo->renderPass);
7058                if (renderPass) {
7059                    if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
7060                        skip_call |= log_msg(
7061                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7062                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7063                            "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must has a subpass index (%d) "
7064                            "that is less than the number of subpasses (%d).",
7065                            (void *)commandBuffer, pInfo->subpass, renderPass->createInfo.subpassCount);
7066                    }
7067                }
7068            }
7069        }
7070        if (CB_RECORDING == cb_node->state) {
7071            skip_call |=
7072                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7073                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
7074                        "vkBeginCommandBuffer(): Cannot call Begin on CB (0x%" PRIxLEAST64
7075                        ") in the RECORDING state. Must first call vkEndCommandBuffer().",
7076                        (uint64_t)commandBuffer);
7077        } else if (CB_RECORDED == cb_node->state || (CB_INVALID == cb_node->state && CMD_END == cb_node->cmds.back().type)) {
7078            VkCommandPool cmdPool = cb_node->createInfo.commandPool;
7079            auto pPool = getCommandPoolNode(dev_data, cmdPool);
7080            if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
7081                skip_call |=
7082                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7083                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7084                            "Call to vkBeginCommandBuffer() on command buffer (0x%" PRIxLEAST64
7085                            ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
7086                            ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7087                            (uint64_t)commandBuffer, (uint64_t)cmdPool);
7088            }
7089            resetCB(dev_data, commandBuffer);
7090        }
7091        // Set updated state here in case implicit reset occurs above
7092        cb_node->state = CB_RECORDING;
7093        cb_node->beginInfo = *pBeginInfo;
7094        if (cb_node->beginInfo.pInheritanceInfo) {
7095            cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
7096            cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
7097            // If we are a secondary command-buffer and inheriting.  Update the items we should inherit.
7098            if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
7099                (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
7100                cb_node->activeRenderPass = getRenderPass(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
7101                cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
7102                cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
7103            }
7104        }
7105    } else {
7106        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7107                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
7108                             "In vkBeginCommandBuffer() and unable to find CommandBuffer Node for CB 0x%p!", (void *)commandBuffer);
7109    }
7110    lock.unlock();
7111    if (skip_call) {
7112        return VK_ERROR_VALIDATION_FAILED_EXT;
7113    }
7114    VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
7115
7116    return result;
7117}
7118
7119VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
7120    bool skip_call = false;
7121    VkResult result = VK_SUCCESS;
7122    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7123    std::unique_lock<std::mutex> lock(global_lock);
7124    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7125    if (pCB) {
7126        if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) || !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
7127            // This needs spec clarification to update valid usage, see comments in PR:
7128            // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
7129            skip_call |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer");
7130        }
7131        skip_call |= addCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
7132        for (auto query : pCB->activeQueries) {
7133            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7134                                 DRAWSTATE_INVALID_QUERY, "DS",
7135                                 "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d",
7136                                 (uint64_t)(query.pool), query.index);
7137        }
7138    }
7139    if (!skip_call) {
7140        lock.unlock();
7141        result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
7142        lock.lock();
7143        if (VK_SUCCESS == result) {
7144            pCB->state = CB_RECORDED;
7145            // Reset CB status flags
7146            pCB->status = 0;
7147            printCB(dev_data, commandBuffer);
7148        }
7149    } else {
7150        result = VK_ERROR_VALIDATION_FAILED_EXT;
7151    }
7152    lock.unlock();
7153    return result;
7154}
7155
7156VKAPI_ATTR VkResult VKAPI_CALL
7157ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
7158    bool skip_call = false;
7159    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7160    std::unique_lock<std::mutex> lock(global_lock);
7161    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7162    VkCommandPool cmdPool = pCB->createInfo.commandPool;
7163    auto pPool = getCommandPoolNode(dev_data, cmdPool);
7164    if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
7165        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7166                             (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER_RESET, "DS",
7167                             "Attempt to reset command buffer (0x%" PRIxLEAST64 ") created from command pool (0x%" PRIxLEAST64
7168                             ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.",
7169                             (uint64_t)commandBuffer, (uint64_t)cmdPool);
7170    }
7171    skip_call |= checkCommandBufferInFlight(dev_data, pCB, "reset");
7172    lock.unlock();
7173    if (skip_call)
7174        return VK_ERROR_VALIDATION_FAILED_EXT;
7175    VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
7176    if (VK_SUCCESS == result) {
7177        lock.lock();
7178        dev_data->globalInFlightCmdBuffers.erase(commandBuffer);
7179        resetCB(dev_data, commandBuffer);
7180        lock.unlock();
7181    }
7182    return result;
7183}
7184
7185VKAPI_ATTR void VKAPI_CALL
7186CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
7187    bool skip_call = false;
7188    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7189    std::unique_lock<std::mutex> lock(global_lock);
7190    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7191    if (pCB) {
7192        skip_call |= addCmd(dev_data, pCB, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
7193        if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (pCB->activeRenderPass)) {
7194            skip_call |=
7195                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7196                        (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
7197                        "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
7198                        (uint64_t)pipeline, (uint64_t)pCB->activeRenderPass->renderPass);
7199        }
7200
7201        PIPELINE_NODE *pPN = getPipeline(dev_data, pipeline);
7202        if (pPN) {
7203            pCB->lastBound[pipelineBindPoint].pipeline_node = pPN;
7204            set_cb_pso_status(pCB, pPN);
7205            set_pipeline_state(pPN);
7206        } else {
7207            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7208                                 (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_PIPELINE, "DS",
7209                                 "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist!", (uint64_t)(pipeline));
7210        }
7211        addCommandBufferBinding(&getPipeline(dev_data, pipeline)->cb_bindings,
7212                                {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT}, pCB);
7213    }
7214    lock.unlock();
7215    if (!skip_call)
7216        dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
7217}
7218
7219VKAPI_ATTR void VKAPI_CALL
7220CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
7221    bool skip_call = false;
7222    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7223    std::unique_lock<std::mutex> lock(global_lock);
7224    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7225    if (pCB) {
7226        skip_call |= addCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
7227        pCB->viewportMask |= ((1u<<viewportCount) - 1u) << firstViewport;
7228    }
7229    lock.unlock();
7230    if (!skip_call)
7231        dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
7232}
7233
7234VKAPI_ATTR void VKAPI_CALL
7235CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
7236    bool skip_call = false;
7237    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7238    std::unique_lock<std::mutex> lock(global_lock);
7239    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7240    if (pCB) {
7241        skip_call |= addCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
7242        pCB->scissorMask |= ((1u<<scissorCount) - 1u) << firstScissor;
7243    }
7244    lock.unlock();
7245    if (!skip_call)
7246        dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
7247}
7248
7249VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
7250    bool skip_call = false;
7251    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7252    std::unique_lock<std::mutex> lock(global_lock);
7253    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7254    if (pCB) {
7255        skip_call |= addCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
7256        pCB->status |= CBSTATUS_LINE_WIDTH_SET;
7257
7258        PIPELINE_NODE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_node;
7259        if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
7260            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
7261                                 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SET, "DS",
7262                                 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
7263                                 "flag.  This is undefined behavior and could be ignored.");
7264        } else {
7265            skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
7266        }
7267    }
7268    lock.unlock();
7269    if (!skip_call)
7270        dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
7271}
7272
7273VKAPI_ATTR void VKAPI_CALL
7274CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
7275    bool skip_call = false;
7276    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7277    std::unique_lock<std::mutex> lock(global_lock);
7278    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7279    if (pCB) {
7280        skip_call |= addCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
7281        pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
7282    }
7283    lock.unlock();
7284    if (!skip_call)
7285        dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
7286}
7287
7288VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
7289    bool skip_call = false;
7290    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7291    std::unique_lock<std::mutex> lock(global_lock);
7292    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7293    if (pCB) {
7294        skip_call |= addCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
7295        pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
7296    }
7297    lock.unlock();
7298    if (!skip_call)
7299        dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
7300}
7301
7302VKAPI_ATTR void VKAPI_CALL
7303CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
7304    bool skip_call = false;
7305    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7306    std::unique_lock<std::mutex> lock(global_lock);
7307    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7308    if (pCB) {
7309        skip_call |= addCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
7310        pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
7311    }
7312    lock.unlock();
7313    if (!skip_call)
7314        dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
7315}
7316
7317VKAPI_ATTR void VKAPI_CALL
7318CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
7319    bool skip_call = false;
7320    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7321    std::unique_lock<std::mutex> lock(global_lock);
7322    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7323    if (pCB) {
7324        skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
7325        pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
7326    }
7327    lock.unlock();
7328    if (!skip_call)
7329        dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
7330}
7331
7332VKAPI_ATTR void VKAPI_CALL
7333CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
7334    bool skip_call = false;
7335    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7336    std::unique_lock<std::mutex> lock(global_lock);
7337    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7338    if (pCB) {
7339        skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7340        pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7341    }
7342    lock.unlock();
7343    if (!skip_call)
7344        dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
7345}
7346
7347VKAPI_ATTR void VKAPI_CALL
7348CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
7349    bool skip_call = false;
7350    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7351    std::unique_lock<std::mutex> lock(global_lock);
7352    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7353    if (pCB) {
7354        skip_call |= addCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7355        pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7356    }
7357    lock.unlock();
7358    if (!skip_call)
7359        dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
7360}
7361
7362VKAPI_ATTR void VKAPI_CALL
7363CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
7364                      uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7365                      const uint32_t *pDynamicOffsets) {
7366    bool skip_call = false;
7367    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7368    std::unique_lock<std::mutex> lock(global_lock);
7369    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7370    if (pCB) {
7371        if (pCB->state == CB_RECORDING) {
7372            // Track total count of dynamic descriptor types to make sure we have an offset for each one
7373            uint32_t totalDynamicDescriptors = 0;
7374            string errorString = "";
7375            uint32_t lastSetIndex = firstSet + setCount - 1;
7376            if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
7377                pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7378                pCB->lastBound[pipelineBindPoint].dynamicOffsets.resize(lastSetIndex + 1);
7379            }
7380            auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
7381            auto pipeline_layout = getPipelineLayout(dev_data, layout);
7382            for (uint32_t i = 0; i < setCount; i++) {
7383                cvdescriptorset::DescriptorSet *pSet = getSetNode(dev_data, pDescriptorSets[i]);
7384                if (pSet) {
7385                    pCB->lastBound[pipelineBindPoint].pipeline_layout = *pipeline_layout;
7386                    pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = pSet;
7387                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7388                                         VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7389                                         DRAWSTATE_NONE, "DS", "DS 0x%" PRIxLEAST64 " bound on pipeline %s",
7390                                         (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
7391                    if (!pSet->IsUpdated() && (pSet->GetTotalDescriptorCount() != 0)) {
7392                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7393                                             VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7394                                             DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
7395                                             "DS 0x%" PRIxLEAST64
7396                                             " bound but it was never updated. You may want to either update it or not bind it.",
7397                                             (uint64_t)pDescriptorSets[i]);
7398                    }
7399                    // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
7400                    if (!verify_set_layout_compatibility(dev_data, pSet, pipeline_layout, i + firstSet, errorString)) {
7401                        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7402                                             VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7403                                             DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
7404                                             "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
7405                                             "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s",
7406                                             i, i + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str());
7407                    }
7408
7409                    auto setDynamicDescriptorCount = pSet->GetDynamicDescriptorCount();
7410
7411                    pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i].clear();
7412
7413                    if (setDynamicDescriptorCount) {
7414                        // First make sure we won't overstep bounds of pDynamicOffsets array
7415                        if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) {
7416                            skip_call |=
7417                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7418                                        VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7419                                        DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7420                                        "descriptorSet #%u (0x%" PRIxLEAST64
7421                                        ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7422                                        "array. There must be one dynamic offset for each dynamic descriptor being bound.",
7423                                        i, (uint64_t)pDescriptorSets[i], pSet->GetDynamicDescriptorCount(),
7424                                        (dynamicOffsetCount - totalDynamicDescriptors));
7425                        } else { // Validate and store dynamic offsets with the set
7426                            // Validate Dynamic Offset Minimums
7427                            uint32_t cur_dyn_offset = totalDynamicDescriptors;
7428                            for (uint32_t d = 0; d < pSet->GetTotalDescriptorCount(); d++) {
7429                                if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
7430                                    if (vk_safe_modulo(
7431                                            pDynamicOffsets[cur_dyn_offset],
7432                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
7433                                        skip_call |= log_msg(
7434                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7435                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7436                                            DRAWSTATE_INVALID_UNIFORM_BUFFER_OFFSET, "DS",
7437                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7438                                            "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64,
7439                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7440                                            dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment);
7441                                    }
7442                                    cur_dyn_offset++;
7443                                } else if (pSet->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
7444                                    if (vk_safe_modulo(
7445                                            pDynamicOffsets[cur_dyn_offset],
7446                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
7447                                        skip_call |= log_msg(
7448                                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7449                                            VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__,
7450                                            DRAWSTATE_INVALID_STORAGE_BUFFER_OFFSET, "DS",
7451                                            "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7452                                            "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64,
7453                                            cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
7454                                            dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment);
7455                                    }
7456                                    cur_dyn_offset++;
7457                                }
7458                            }
7459
7460                            pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i] =
7461                                std::vector<uint32_t>(pDynamicOffsets + totalDynamicDescriptors,
7462                                                      pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount);
7463                            // Keep running total of dynamic descriptor count to verify at the end
7464                            totalDynamicDescriptors += setDynamicDescriptorCount;
7465
7466                        }
7467                    }
7468                } else {
7469                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7470                                         VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7471                                         DRAWSTATE_INVALID_SET, "DS", "Attempt to bind DS 0x%" PRIxLEAST64 " that doesn't exist!",
7472                                         (uint64_t)pDescriptorSets[i]);
7473                }
7474                skip_call |= addCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7475                // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7476                if (firstSet > 0) { // Check set #s below the first bound set
7477                    for (uint32_t i = 0; i < firstSet; ++i) {
7478                        if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
7479                            !verify_set_layout_compatibility(dev_data, pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i],
7480                                                             pipeline_layout, i, errorString)) {
7481                            skip_call |= log_msg(
7482                                dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7483                                VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7484                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
7485                                "DescriptorSetDS 0x%" PRIxLEAST64
7486                                " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7487                                (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7488                            pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7489                        }
7490                    }
7491                }
7492                // Check if newly last bound set invalidates any remaining bound sets
7493                if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7494                    if (oldFinalBoundSet &&
7495                        !verify_set_layout_compatibility(dev_data, oldFinalBoundSet, pipeline_layout, lastSetIndex, errorString)) {
7496                        auto old_set = oldFinalBoundSet->GetSet();
7497                        skip_call |=
7498                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7499                                    VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__,
7500                                    DRAWSTATE_NONE, "DS", "DescriptorSetDS 0x%" PRIxLEAST64
7501                                                          " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
7502                                                          " newly bound as set #%u so set #%u and any subsequent sets were "
7503                                                          "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
7504                                    reinterpret_cast<uint64_t &>(old_set), lastSetIndex,
7505                                    (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7506                                    lastSetIndex + 1, (uint64_t)layout);
7507                        pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7508                    }
7509                }
7510            }
7511            //  dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7512            if (totalDynamicDescriptors != dynamicOffsetCount) {
7513                skip_call |=
7514                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7515                            (uint64_t)commandBuffer, __LINE__, DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
7516                            "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
7517                            "is %u. It should exactly match the number of dynamic descriptors.",
7518                            setCount, totalDynamicDescriptors, dynamicOffsetCount);
7519            }
7520        } else {
7521            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
7522        }
7523    }
7524    lock.unlock();
7525    if (!skip_call)
7526        dev_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7527                                                       pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
7528}
7529
7530VKAPI_ATTR void VKAPI_CALL
7531CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
7532    bool skip_call = false;
7533    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7534    // TODO : Somewhere need to verify that IBs have correct usage state flagged
7535    std::unique_lock<std::mutex> lock(global_lock);
7536
7537    auto buff_node = getBufferNode(dev_data, buffer);
7538    auto cb_node = getCBNode(dev_data, commandBuffer);
7539    if (cb_node && buff_node) {
7540        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdBindIndexBuffer()");
7541        std::function<bool()> function = [=]() {
7542            return ValidateBufferMemoryIsValid(dev_data, buff_node, "vkCmdBindIndexBuffer()");
7543        };
7544        cb_node->validate_functions.push_back(function);
7545        skip_call |= addCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7546        VkDeviceSize offset_align = 0;
7547        switch (indexType) {
7548        case VK_INDEX_TYPE_UINT16:
7549            offset_align = 2;
7550            break;
7551        case VK_INDEX_TYPE_UINT32:
7552            offset_align = 4;
7553            break;
7554        default:
7555            // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7556            break;
7557        }
7558        if (!offset_align || (offset % offset_align)) {
7559            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7560                                 DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7561                                 "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7562                                 offset, string_VkIndexType(indexType));
7563        }
7564        cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
7565    } else {
7566        assert(0);
7567    }
7568    lock.unlock();
7569    if (!skip_call)
7570        dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
7571}
7572
7573void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7574    uint32_t end = firstBinding + bindingCount;
7575    if (pCB->currentDrawData.buffers.size() < end) {
7576        pCB->currentDrawData.buffers.resize(end);
7577    }
7578    for (uint32_t i = 0; i < bindingCount; ++i) {
7579        pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7580    }
7581}
7582
7583static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
7584
7585VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7586                                                uint32_t bindingCount, const VkBuffer *pBuffers,
7587                                                const VkDeviceSize *pOffsets) {
7588    bool skip_call = false;
7589    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7590    // TODO : Somewhere need to verify that VBs have correct usage state flagged
7591    std::unique_lock<std::mutex> lock(global_lock);
7592
7593    auto cb_node = getCBNode(dev_data, commandBuffer);
7594    if (cb_node) {
7595        for (uint32_t i = 0; i < bindingCount; ++i) {
7596            auto buff_node = getBufferNode(dev_data, pBuffers[i]);
7597            assert(buff_node);
7598            skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdBindVertexBuffers()");
7599            std::function<bool()> function = [=]() {
7600                return ValidateBufferMemoryIsValid(dev_data, buff_node, "vkCmdBindVertexBuffers()");
7601            };
7602            cb_node->validate_functions.push_back(function);
7603        }
7604        addCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7605        updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
7606    } else {
7607        skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
7608    }
7609    lock.unlock();
7610    if (!skip_call)
7611        dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
7612}
7613
7614/* expects global_lock to be held by caller */
7615static bool markStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
7616    bool skip_call = false;
7617
7618    for (auto imageView : pCB->updateImages) {
7619        auto view_state = getImageViewState(dev_data, imageView);
7620        if (!view_state)
7621            continue;
7622
7623        auto img_node = getImageNode(dev_data, view_state->create_info.image);
7624        assert(img_node);
7625        std::function<bool()> function = [=]() {
7626            SetImageMemoryValid(dev_data, img_node, true);
7627            return false;
7628        };
7629        pCB->validate_functions.push_back(function);
7630    }
7631    for (auto buffer : pCB->updateBuffers) {
7632        auto buff_node = getBufferNode(dev_data, buffer);
7633        assert(buff_node);
7634        std::function<bool()> function = [=]() {
7635            SetBufferMemoryValid(dev_data, buff_node, true);
7636            return false;
7637        };
7638        pCB->validate_functions.push_back(function);
7639    }
7640    return skip_call;
7641}
7642
7643VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
7644                                   uint32_t firstVertex, uint32_t firstInstance) {
7645    bool skip_call = false;
7646    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7647    std::unique_lock<std::mutex> lock(global_lock);
7648    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7649    if (pCB) {
7650        skip_call |= addCmd(dev_data, pCB, CMD_DRAW, "vkCmdDraw()");
7651        pCB->drawCount[DRAW]++;
7652        skip_call |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDraw");
7653        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7654        // TODO : Need to pass commandBuffer as srcObj here
7655        skip_call |=
7656            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7657                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDraw() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW]++);
7658        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7659        if (!skip_call) {
7660            updateResourceTrackingOnDraw(pCB);
7661        }
7662        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdDraw");
7663    }
7664    lock.unlock();
7665    if (!skip_call)
7666        dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
7667}
7668
7669VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
7670                                          uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
7671                                                            uint32_t firstInstance) {
7672    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7673    bool skip_call = false;
7674    std::unique_lock<std::mutex> lock(global_lock);
7675    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7676    if (pCB) {
7677        skip_call |= addCmd(dev_data, pCB, CMD_DRAWINDEXED, "vkCmdDrawIndexed()");
7678        pCB->drawCount[DRAW_INDEXED]++;
7679        skip_call |= validate_and_update_draw_state(dev_data, pCB, true, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexed");
7680        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7681        // TODO : Need to pass commandBuffer as srcObj here
7682        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7683                             VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7684                             "vkCmdDrawIndexed() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDEXED]++);
7685        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7686        if (!skip_call) {
7687            updateResourceTrackingOnDraw(pCB);
7688        }
7689        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdDrawIndexed");
7690    }
7691    lock.unlock();
7692    if (!skip_call)
7693        dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
7694}
7695
7696VKAPI_ATTR void VKAPI_CALL
7697CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7698    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7699    bool skip_call = false;
7700    std::unique_lock<std::mutex> lock(global_lock);
7701
7702    auto cb_node = getCBNode(dev_data, commandBuffer);
7703    auto buff_node = getBufferNode(dev_data, buffer);
7704    if (cb_node && buff_node) {
7705        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDrawIndirect()");
7706        AddCommandBufferBindingBuffer(dev_data, cb_node, buff_node);
7707        skip_call |= addCmd(dev_data, cb_node, CMD_DRAWINDIRECT, "vkCmdDrawIndirect()");
7708        cb_node->drawCount[DRAW_INDIRECT]++;
7709        skip_call |= validate_and_update_draw_state(dev_data, cb_node, false, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndirect");
7710        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
7711        // TODO : Need to pass commandBuffer as srcObj here
7712        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7713                             VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_NONE, "DS",
7714                             "vkCmdDrawIndirect() call 0x%" PRIx64 ", reporting DS state:", g_drawCount[DRAW_INDIRECT]++);
7715        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7716        if (!skip_call) {
7717            updateResourceTrackingOnDraw(cb_node);
7718        }
7719        skip_call |= outsideRenderPass(dev_data, cb_node, "vkCmdDrawIndirect()");
7720    } else {
7721        assert(0);
7722    }
7723    lock.unlock();
7724    if (!skip_call)
7725        dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
7726}
7727
7728VKAPI_ATTR void VKAPI_CALL
7729CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
7730    bool skip_call = false;
7731    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7732    std::unique_lock<std::mutex> lock(global_lock);
7733
7734    auto cb_node = getCBNode(dev_data, commandBuffer);
7735    auto buff_node = getBufferNode(dev_data, buffer);
7736    if (cb_node && buff_node) {
7737        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDrawIndexedIndirect()");
7738        AddCommandBufferBindingBuffer(dev_data, cb_node, buff_node);
7739        skip_call |= addCmd(dev_data, cb_node, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()");
7740        cb_node->drawCount[DRAW_INDEXED_INDIRECT]++;
7741        skip_call |=
7742            validate_and_update_draw_state(dev_data, cb_node, true, VK_PIPELINE_BIND_POINT_GRAPHICS, "vkCmdDrawIndexedIndirect");
7743        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
7744        // TODO : Need to pass commandBuffer as srcObj here
7745        skip_call |=
7746            log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7747                    __LINE__, DRAWSTATE_NONE, "DS", "vkCmdDrawIndexedIndirect() call 0x%" PRIx64 ", reporting DS state:",
7748                    g_drawCount[DRAW_INDEXED_INDIRECT]++);
7749        skip_call |= synchAndPrintDSConfig(dev_data, commandBuffer);
7750        if (!skip_call) {
7751            updateResourceTrackingOnDraw(cb_node);
7752        }
7753        skip_call |= outsideRenderPass(dev_data, cb_node, "vkCmdDrawIndexedIndirect()");
7754    } else {
7755        assert(0);
7756    }
7757    lock.unlock();
7758    if (!skip_call)
7759        dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
7760}
7761
7762VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
7763    bool skip_call = false;
7764    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7765    std::unique_lock<std::mutex> lock(global_lock);
7766    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7767    if (pCB) {
7768        skip_call |= validate_and_update_draw_state(dev_data, pCB, false, VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatch");
7769        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, pCB);
7770        skip_call |= addCmd(dev_data, pCB, CMD_DISPATCH, "vkCmdDispatch()");
7771        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdDispatch");
7772    }
7773    lock.unlock();
7774    if (!skip_call)
7775        dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
7776}
7777
7778VKAPI_ATTR void VKAPI_CALL
7779CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
7780    bool skip_call = false;
7781    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7782    std::unique_lock<std::mutex> lock(global_lock);
7783
7784    auto cb_node = getCBNode(dev_data, commandBuffer);
7785    auto buff_node = getBufferNode(dev_data, buffer);
7786    if (cb_node && buff_node) {
7787        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buff_node, "vkCmdDispatchIndirect()");
7788        AddCommandBufferBindingBuffer(dev_data, cb_node, buff_node);
7789        skip_call |=
7790            validate_and_update_draw_state(dev_data, cb_node, false, VK_PIPELINE_BIND_POINT_COMPUTE, "vkCmdDispatchIndirect");
7791        skip_call |= markStoreImagesAndBuffersAsWritten(dev_data, cb_node);
7792        skip_call |= addCmd(dev_data, cb_node, CMD_DISPATCHINDIRECT, "vkCmdDispatchIndirect()");
7793        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdDispatchIndirect()");
7794    }
7795    lock.unlock();
7796    if (!skip_call)
7797        dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
7798}
7799
7800VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
7801                                         uint32_t regionCount, const VkBufferCopy *pRegions) {
7802    bool skip_call = false;
7803    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
7804    std::unique_lock<std::mutex> lock(global_lock);
7805
7806    auto cb_node = getCBNode(dev_data, commandBuffer);
7807    auto src_buff_node = getBufferNode(dev_data, srcBuffer);
7808    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
7809    if (cb_node && src_buff_node && dst_buff_node) {
7810        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_node, "vkCmdCopyBuffer()");
7811        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyBuffer()");
7812        // Update bindings between buffers and cmd buffer
7813        AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_node);
7814        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
7815        // Validate that SRC & DST buffers have correct usage flags set
7816        skip_call |= ValidateBufferUsageFlags(dev_data, src_buff_node, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyBuffer()",
7817                                              "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
7818        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyBuffer()",
7819                                              "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
7820
7821        std::function<bool()> function = [=]() {
7822            return ValidateBufferMemoryIsValid(dev_data, src_buff_node, "vkCmdCopyBuffer()");
7823        };
7824        cb_node->validate_functions.push_back(function);
7825        function = [=]() {
7826            SetBufferMemoryValid(dev_data, dst_buff_node, true);
7827            return false;
7828        };
7829        cb_node->validate_functions.push_back(function);
7830
7831        skip_call |= addCmd(dev_data, cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
7832        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBuffer()");
7833    } else {
7834        // Param_checker will flag errors on invalid objects, just assert here as debugging aid
7835        assert(0);
7836    }
7837    lock.unlock();
7838    if (!skip_call)
7839        dev_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
7840}
7841
7842static bool VerifySourceImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage srcImage,
7843                                    VkImageSubresourceLayers subLayers, VkImageLayout srcImageLayout) {
7844    bool skip_call = false;
7845
7846    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7847        uint32_t layer = i + subLayers.baseArrayLayer;
7848        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7849        IMAGE_CMD_BUF_LAYOUT_NODE node;
7850        if (!FindLayout(cb_node, srcImage, sub, node)) {
7851            SetLayout(cb_node, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
7852            continue;
7853        }
7854        if (node.layout != srcImageLayout) {
7855            // TODO: Improve log message in the next pass
7856            skip_call |=
7857                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7858                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
7859                                                                        "and doesn't match the current layout %s.",
7860                        string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
7861        }
7862    }
7863    if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
7864        if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7865            // TODO : Can we deal with image node from the top of call tree and avoid map look-up here?
7866            auto image_node = getImageNode(dev_data, srcImage);
7867            if (image_node->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
7868                // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7869                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7870                                     (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7871                                     "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
7872            }
7873        } else {
7874            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7875                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for input image is %s but can only be "
7876                                                                       "TRANSFER_SRC_OPTIMAL or GENERAL.",
7877                                 string_VkImageLayout(srcImageLayout));
7878        }
7879    }
7880    return skip_call;
7881}
7882
7883static bool VerifyDestImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage destImage,
7884                                  VkImageSubresourceLayers subLayers, VkImageLayout destImageLayout) {
7885    bool skip_call = false;
7886
7887    for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
7888        uint32_t layer = i + subLayers.baseArrayLayer;
7889        VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
7890        IMAGE_CMD_BUF_LAYOUT_NODE node;
7891        if (!FindLayout(cb_node, destImage, sub, node)) {
7892            SetLayout(cb_node, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
7893            continue;
7894        }
7895        if (node.layout != destImageLayout) {
7896            skip_call |=
7897                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
7898                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
7899                                                                        "doesn't match the current layout %s.",
7900                        string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
7901        }
7902    }
7903    if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
7904        if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
7905            auto image_node = getImageNode(dev_data, destImage);
7906            if (image_node->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
7907                // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
7908                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7909                                     (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
7910                                     "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
7911            }
7912        } else {
7913            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7914                                 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Layout for output image is %s but can only be "
7915                                                                       "TRANSFER_DST_OPTIMAL or GENERAL.",
7916                                 string_VkImageLayout(destImageLayout));
7917        }
7918    }
7919    return skip_call;
7920}
7921
7922// Test if two VkExtent3D structs are equivalent
7923static inline bool IsExtentEqual(const VkExtent3D *extent, const VkExtent3D *other_extent) {
7924    bool result = true;
7925    if ((extent->width != other_extent->width) || (extent->height != other_extent->height) ||
7926        (extent->depth != other_extent->depth)) {
7927        result = false;
7928    }
7929    return result;
7930}
7931
7932// Returns the image extent of a specific subresource.
7933static inline VkExtent3D GetImageSubresourceExtent(const IMAGE_NODE *img, const VkImageSubresourceLayers *subresource) {
7934    const uint32_t mip = subresource->mipLevel;
7935    VkExtent3D extent = img->createInfo.extent;
7936    extent.width = std::max(1U, extent.width >> mip);
7937    extent.height = std::max(1U, extent.height >> mip);
7938    extent.depth = std::max(1U, extent.depth >> mip);
7939    return extent;
7940}
7941
7942// Test if the extent argument has all dimensions set to 0.
7943static inline bool IsExtentZero(const VkExtent3D *extent) {
7944    return ((extent->width == 0) && (extent->height == 0) && (extent->depth == 0));
7945}
7946
7947// Returns the image transfer granularity for a specific image scaled by compressed block size if necessary.
7948static inline VkExtent3D GetScaledItg(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_NODE *img) {
7949    // Default to (0, 0, 0) granularity in case we can't find the real granularity for the physical device.
7950    VkExtent3D granularity = { 0, 0, 0 };
7951    auto pPool = getCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
7952    if (pPool) {
7953        granularity = dev_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity;
7954        if (vk_format_is_compressed(img->createInfo.format)) {
7955            auto block_size = vk_format_compressed_block_size(img->createInfo.format);
7956            granularity.width *= block_size.width;
7957            granularity.height *= block_size.height;
7958        }
7959    }
7960    return granularity;
7961}
7962
7963// Test elements of a VkExtent3D structure against alignment constraints contained in another VkExtent3D structure
7964static inline bool IsExtentAligned(const VkExtent3D *extent, const VkExtent3D *granularity) {
7965    bool valid = true;
7966    if ((vk_safe_modulo(extent->depth, granularity->depth) != 0) || (vk_safe_modulo(extent->width, granularity->width) != 0) ||
7967        (vk_safe_modulo(extent->height, granularity->height) != 0)) {
7968        valid = false;
7969    }
7970    return valid;
7971}
7972
7973// Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values
7974static inline bool CheckItgOffset(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkOffset3D *offset,
7975                                  const VkExtent3D *granularity, const uint32_t i, const char *function, const char *member) {
7976    bool skip = false;
7977    VkExtent3D offset_extent = {};
7978    offset_extent.width = static_cast<uint32_t>(abs(offset->x));
7979    offset_extent.height = static_cast<uint32_t>(abs(offset->y));
7980    offset_extent.depth = static_cast<uint32_t>(abs(offset->z));
7981    if (IsExtentZero(granularity)) {
7982        // If the queue family image transfer granularity is (0, 0, 0), then the offset must always be (0, 0, 0)
7983        if (IsExtentZero(&offset_extent) == false) {
7984            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7985                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7986                            "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) must be (x=0, y=0, z=0) "
7987                            "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
7988                            function, i, member, offset->x, offset->y, offset->z);
7989        }
7990    } else {
7991        // If the queue family image transfer granularity is not (0, 0, 0), then the offset dimensions must always be even
7992        // integer multiples of the image transfer granularity.
7993        if (IsExtentAligned(&offset_extent, granularity) == false) {
7994            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7995                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
7996                            "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) dimensions must be even integer "
7997                            "multiples of this command buffer's queue family image transfer granularity (w=%d, h=%d, d=%d).",
7998                            function, i, member, offset->x, offset->y, offset->z, granularity->width, granularity->height,
7999                            granularity->depth);
8000        }
8001    }
8002    return skip;
8003}
8004
8005// Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values
8006static inline bool CheckItgExtent(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkExtent3D *extent,
8007                                  const VkOffset3D *offset, const VkExtent3D *granularity, const VkExtent3D *subresource_extent,
8008                                  const uint32_t i, const char *function, const char *member) {
8009    bool skip = false;
8010    if (IsExtentZero(granularity)) {
8011        // If the queue family image transfer granularity is (0, 0, 0), then the extent must always match the image
8012        // subresource extent.
8013        if (IsExtentEqual(extent, subresource_extent) == false) {
8014            skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8015                            DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8016                            "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d) "
8017                            "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
8018                            function, i, member, extent->width, extent->height, extent->depth, subresource_extent->width,
8019                            subresource_extent->height, subresource_extent->depth);
8020        }
8021    } else {
8022        // If the queue family image transfer granularity is not (0, 0, 0), then the extent dimensions must always be even
8023        // integer multiples of the image transfer granularity or the offset + extent dimensions must always match the image
8024        // subresource extent dimensions.
8025        VkExtent3D offset_extent_sum = {};
8026        offset_extent_sum.width = static_cast<uint32_t>(abs(offset->x)) + extent->width;
8027        offset_extent_sum.height = static_cast<uint32_t>(abs(offset->y)) + extent->height;
8028        offset_extent_sum.depth = static_cast<uint32_t>(abs(offset->z)) + extent->depth;
8029        if ((IsExtentAligned(extent, granularity) == false) && (IsExtentEqual(&offset_extent_sum, subresource_extent) == false)) {
8030            skip |=
8031                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8032                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8033                        "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) dimensions must be even integer multiples of this command buffer's "
8034                        "queue family image transfer granularity (w=%d, h=%d, d=%d) or offset (x=%d, y=%d, z=%d) + "
8035                        "extent (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d).",
8036                        function, i, member, extent->width, extent->height, extent->depth, granularity->width, granularity->height,
8037                        granularity->depth, offset->x, offset->y, offset->z, extent->width, extent->height, extent->depth,
8038                        subresource_extent->width, subresource_extent->height, subresource_extent->depth);
8039        }
8040    }
8041    return skip;
8042}
8043
8044// Check a uint32_t width or stride value against a queue family's Image Transfer Granularity width value
8045static inline bool CheckItgInt(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const uint32_t value,
8046                               const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
8047    bool skip = false;
8048    if (vk_safe_modulo(value, granularity) != 0) {
8049        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8050                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8051                        "%s: pRegion[%d].%s (%d) must be an even integer multiple of this command buffer's queue family image "
8052                        "transfer granularity width (%d).",
8053                        function, i, member, value, granularity);
8054    }
8055    return skip;
8056}
8057
8058// Check a VkDeviceSize value against a queue family's Image Transfer Granularity width value
8059static inline bool CheckItgSize(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkDeviceSize value,
8060                                const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
8061    bool skip = false;
8062    if (vk_safe_modulo(value, granularity) != 0) {
8063        skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8064                        DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8065                        "%s: pRegion[%d].%s (%" PRIdLEAST64
8066                        ") must be an even integer multiple of this command buffer's queue family image transfer "
8067                        "granularity width (%d).",
8068                        function, i, member, value, granularity);
8069    }
8070    return skip;
8071}
8072
8073// Check valid usage Image Tranfer Granularity requirements for elements of a VkImageCopy structure
8074static inline bool ValidateCopyImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
8075                                                                    const IMAGE_NODE *img, const VkImageCopy *region,
8076                                                                    const uint32_t i, const char *function) {
8077    bool skip = false;
8078    VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
8079    skip |= CheckItgOffset(dev_data, cb_node, &region->srcOffset, &granularity, i, function, "srcOffset");
8080    skip |= CheckItgOffset(dev_data, cb_node, &region->dstOffset, &granularity, i, function, "dstOffset");
8081    VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->dstSubresource);
8082    skip |= CheckItgExtent(dev_data, cb_node, &region->extent, &region->dstOffset, &granularity, &subresource_extent, i, function,
8083                           "extent");
8084    return skip;
8085}
8086
8087// Check valid usage Image Tranfer Granularity requirements for elements of a VkBufferImageCopy structure
8088static inline bool ValidateCopyBufferImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
8089                                                                          const IMAGE_NODE *img, const VkBufferImageCopy *region,
8090                                                                          const uint32_t i, const char *function) {
8091    bool skip = false;
8092    VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
8093    skip |= CheckItgSize(dev_data, cb_node, region->bufferOffset, granularity.width, i, function, "bufferOffset");
8094    skip |= CheckItgInt(dev_data, cb_node, region->bufferRowLength, granularity.width, i, function, "bufferRowLength");
8095    skip |= CheckItgInt(dev_data, cb_node, region->bufferImageHeight, granularity.width, i, function, "bufferImageHeight");
8096    skip |= CheckItgOffset(dev_data, cb_node, &region->imageOffset, &granularity, i, function, "imageOffset");
8097    VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->imageSubresource);
8098    skip |= CheckItgExtent(dev_data, cb_node, &region->imageExtent, &region->imageOffset, &granularity, &subresource_extent, i,
8099                           function, "imageExtent");
8100    return skip;
8101}
8102
8103VKAPI_ATTR void VKAPI_CALL
8104CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8105             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
8106    bool skip_call = false;
8107    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8108    std::unique_lock<std::mutex> lock(global_lock);
8109
8110    auto cb_node = getCBNode(dev_data, commandBuffer);
8111    auto src_img_node = getImageNode(dev_data, srcImage);
8112    auto dst_img_node = getImageNode(dev_data, dstImage);
8113    if (cb_node && src_img_node && dst_img_node) {
8114        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdCopyImage()");
8115        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdCopyImage()");
8116        // Update bindings between images and cmd buffer
8117        AddCommandBufferBindingImage(dev_data, cb_node, src_img_node);
8118        AddCommandBufferBindingImage(dev_data, cb_node, dst_img_node);
8119        // Validate that SRC & DST images have correct usage flags set
8120        skip_call |= ValidateImageUsageFlags(dev_data, src_img_node, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdCopyImage()",
8121                                             "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8122        skip_call |= ValidateImageUsageFlags(dev_data, dst_img_node, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdCopyImage()",
8123                                             "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8124        std::function<bool()> function = [=]() { return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdCopyImage()"); };
8125        cb_node->validate_functions.push_back(function);
8126        function = [=]() {
8127            SetImageMemoryValid(dev_data, dst_img_node, true);
8128            return false;
8129        };
8130        cb_node->validate_functions.push_back(function);
8131
8132        skip_call |= addCmd(dev_data, cb_node, CMD_COPYIMAGE, "vkCmdCopyImage()");
8133        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImage()");
8134        for (uint32_t i = 0; i < regionCount; ++i) {
8135            skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].srcSubresource, srcImageLayout);
8136            skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].dstSubresource, dstImageLayout);
8137            skip_call |= ValidateCopyImageTransferGranularityRequirements(dev_data, cb_node, dst_img_node, &pRegions[i], i,
8138                                                                          "vkCmdCopyImage()");
8139        }
8140    } else {
8141        assert(0);
8142    }
8143    lock.unlock();
8144    if (!skip_call)
8145        dev_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8146                                              pRegions);
8147}
8148
8149// Validate that an image's sampleCount matches the requirement for a specific API call
8150static inline bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_NODE *image_node, VkSampleCountFlagBits sample_count,
8151                                            const char *location) {
8152    bool skip = false;
8153    if (image_node->createInfo.samples != sample_count) {
8154        skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8155                       reinterpret_cast<uint64_t &>(image_node->image), 0, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
8156                       "%s for image 0x%" PRIxLEAST64 " was created with a sample count of %s but must be %s.", location,
8157                       reinterpret_cast<uint64_t &>(image_node->image),
8158                       string_VkSampleCountFlagBits(image_node->createInfo.samples), string_VkSampleCountFlagBits(sample_count));
8159    }
8160    return skip;
8161}
8162
8163VKAPI_ATTR void VKAPI_CALL
8164CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8165             VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
8166    bool skip_call = false;
8167    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8168    std::unique_lock<std::mutex> lock(global_lock);
8169
8170    auto cb_node = getCBNode(dev_data, commandBuffer);
8171    auto src_img_node = getImageNode(dev_data, srcImage);
8172    auto dst_img_node = getImageNode(dev_data, dstImage);
8173    if (cb_node && src_img_node && dst_img_node) {
8174        skip_call |= ValidateImageSampleCount(dev_data, src_img_node, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): srcImage");
8175        skip_call |= ValidateImageSampleCount(dev_data, dst_img_node, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): dstImage");
8176        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdBlitImage()");
8177        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdBlitImage()");
8178        // Update bindings between images and cmd buffer
8179        AddCommandBufferBindingImage(dev_data, cb_node, src_img_node);
8180        AddCommandBufferBindingImage(dev_data, cb_node, dst_img_node);
8181        // Validate that SRC & DST images have correct usage flags set
8182        skip_call |= ValidateImageUsageFlags(dev_data, src_img_node, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, "vkCmdBlitImage()",
8183                                             "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8184        skip_call |= ValidateImageUsageFlags(dev_data, dst_img_node, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, "vkCmdBlitImage()",
8185                                             "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8186        std::function<bool()> function = [=]() { return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdBlitImage()"); };
8187        cb_node->validate_functions.push_back(function);
8188        function = [=]() {
8189            SetImageMemoryValid(dev_data, dst_img_node, true);
8190            return false;
8191        };
8192        cb_node->validate_functions.push_back(function);
8193
8194        skip_call |= addCmd(dev_data, cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()");
8195        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBlitImage()");
8196    } else {
8197        assert(0);
8198    }
8199    lock.unlock();
8200    if (!skip_call)
8201        dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8202                                              pRegions, filter);
8203}
8204
8205VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
8206                                                VkImage dstImage, VkImageLayout dstImageLayout,
8207                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8208    bool skip_call = false;
8209    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8210    std::unique_lock<std::mutex> lock(global_lock);
8211
8212    auto cb_node = getCBNode(dev_data, commandBuffer);
8213    auto src_buff_node = getBufferNode(dev_data, srcBuffer);
8214    auto dst_img_node = getImageNode(dev_data, dstImage);
8215    if (cb_node && src_buff_node && dst_img_node) {
8216        skip_call |= ValidateImageSampleCount(dev_data, dst_img_node, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyBufferToImage(): dstImage");
8217        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_node, "vkCmdCopyBufferToImage()");
8218        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdCopyBufferToImage()");
8219        AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_node);
8220        AddCommandBufferBindingImage(dev_data, cb_node, dst_img_node);
8221        skip_call |= ValidateBufferUsageFlags(dev_data, src_buff_node, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
8222                                              "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
8223        skip_call |= ValidateImageUsageFlags(dev_data, dst_img_node, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8224                                             "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
8225        std::function<bool()> function = [=]() {
8226            SetImageMemoryValid(dev_data, dst_img_node, true);
8227            return false;
8228        };
8229        cb_node->validate_functions.push_back(function);
8230        function = [=]() { return ValidateBufferMemoryIsValid(dev_data, src_buff_node, "vkCmdCopyBufferToImage()"); };
8231        cb_node->validate_functions.push_back(function);
8232
8233        skip_call |= addCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
8234        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBufferToImage()");
8235        for (uint32_t i = 0; i < regionCount; ++i) {
8236            skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].imageSubresource, dstImageLayout);
8237            skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, dst_img_node, &pRegions[i], i,
8238                                                                                "vkCmdCopyBufferToImage()");
8239        }
8240    } else {
8241        assert(0);
8242    }
8243    lock.unlock();
8244    if (!skip_call)
8245        dev_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
8246}
8247
8248VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
8249                                                VkImageLayout srcImageLayout, VkBuffer dstBuffer,
8250                                                uint32_t regionCount, const VkBufferImageCopy *pRegions) {
8251    bool skip_call = false;
8252    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8253    std::unique_lock<std::mutex> lock(global_lock);
8254
8255    auto cb_node = getCBNode(dev_data, commandBuffer);
8256    auto src_img_node = getImageNode(dev_data, srcImage);
8257    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
8258    if (cb_node && src_img_node && dst_buff_node) {
8259        skip_call |= ValidateImageSampleCount(dev_data, src_img_node, VK_SAMPLE_COUNT_1_BIT, "vkCmdCopyImageToBuffer(): srcImage");
8260        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdCopyImageToBuffer()");
8261        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyImageToBuffer()");
8262        // Update bindings between buffer/image and cmd buffer
8263        AddCommandBufferBindingImage(dev_data, cb_node, src_img_node);
8264        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
8265        // Validate that SRC image & DST buffer have correct usage flags set
8266        skip_call |= ValidateImageUsageFlags(dev_data, src_img_node, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8267                                             "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8268        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8269                                              "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8270        std::function<bool()> function = [=]() {
8271            return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdCopyImageToBuffer()");
8272        };
8273        cb_node->validate_functions.push_back(function);
8274        function = [=]() {
8275            SetBufferMemoryValid(dev_data, dst_buff_node, true);
8276            return false;
8277        };
8278        cb_node->validate_functions.push_back(function);
8279
8280        skip_call |= addCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
8281        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImageToBuffer()");
8282        for (uint32_t i = 0; i < regionCount; ++i) {
8283            skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].imageSubresource, srcImageLayout);
8284            skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, src_img_node, &pRegions[i], i,
8285                                                                                "CmdCopyImageToBuffer");
8286        }
8287    } else {
8288        assert(0);
8289    }
8290    lock.unlock();
8291    if (!skip_call)
8292        dev_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
8293}
8294
8295VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
8296                                           VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
8297    bool skip_call = false;
8298    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8299    std::unique_lock<std::mutex> lock(global_lock);
8300
8301    auto cb_node = getCBNode(dev_data, commandBuffer);
8302    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
8303    if (cb_node && dst_buff_node) {
8304        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdUpdateBuffer()");
8305        // Update bindings between buffer and cmd buffer
8306        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
8307        // Validate that DST buffer has correct usage flags set
8308        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
8309                                              "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8310        std::function<bool()> function = [=]() {
8311            SetBufferMemoryValid(dev_data, dst_buff_node, true);
8312            return false;
8313        };
8314        cb_node->validate_functions.push_back(function);
8315
8316        skip_call |= addCmd(dev_data, cb_node, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
8317        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyUpdateBuffer()");
8318    } else {
8319        assert(0);
8320    }
8321    lock.unlock();
8322    if (!skip_call)
8323        dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
8324}
8325
8326VKAPI_ATTR void VKAPI_CALL
8327CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
8328    bool skip_call = false;
8329    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8330    std::unique_lock<std::mutex> lock(global_lock);
8331
8332    auto cb_node = getCBNode(dev_data, commandBuffer);
8333    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
8334    if (cb_node && dst_buff_node) {
8335        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdFillBuffer()");
8336        // Update bindings between buffer and cmd buffer
8337        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
8338        // Validate that DST buffer has correct usage flags set
8339        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "vkCmdFillBuffer()",
8340                                              "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
8341        std::function<bool()> function = [=]() {
8342            SetBufferMemoryValid(dev_data, dst_buff_node, true);
8343            return false;
8344        };
8345        cb_node->validate_functions.push_back(function);
8346
8347        skip_call |= addCmd(dev_data, cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()");
8348        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyFillBuffer()");
8349    } else {
8350        assert(0);
8351    }
8352    lock.unlock();
8353    if (!skip_call)
8354        dev_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
8355}
8356
8357VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
8358                                               const VkClearAttachment *pAttachments, uint32_t rectCount,
8359                                               const VkClearRect *pRects) {
8360    bool skip_call = false;
8361    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8362    std::unique_lock<std::mutex> lock(global_lock);
8363    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8364    if (pCB) {
8365        skip_call |= addCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
8366        // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
8367        if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
8368            (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
8369            // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
8370            // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
8371            // call CmdClearAttachments
8372            // Otherwise this seems more like a performance warning.
8373            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8374                                 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t &>(commandBuffer),
8375                                 0, DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
8376                                 "vkCmdClearAttachments() issued on CB object 0x%" PRIxLEAST64 " prior to any Draw Cmds."
8377                                 " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
8378                                 (uint64_t)(commandBuffer));
8379        }
8380        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments()");
8381    }
8382
8383    // Validate that attachment is in reference list of active subpass
8384    if (pCB->activeRenderPass) {
8385        const VkRenderPassCreateInfo *pRPCI = pCB->activeRenderPass->createInfo.ptr();
8386        const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
8387
8388        for (uint32_t attachment_idx = 0; attachment_idx < attachmentCount; attachment_idx++) {
8389            const VkClearAttachment *attachment = &pAttachments[attachment_idx];
8390            if (attachment->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
8391                if (attachment->colorAttachment >= pSD->colorAttachmentCount) {
8392                    skip_call |= log_msg(
8393                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8394                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8395                        "vkCmdClearAttachments() color attachment index %d out of range for active subpass %d; ignored",
8396                        attachment->colorAttachment, pCB->activeSubpass);
8397                }
8398                else if (pSD->pColorAttachments[attachment->colorAttachment].attachment == VK_ATTACHMENT_UNUSED) {
8399                    skip_call |= log_msg(
8400                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8401                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8402                        "vkCmdClearAttachments() color attachment index %d is VK_ATTACHMENT_UNUSED; ignored",
8403                        attachment->colorAttachment);
8404                }
8405            } else if (attachment->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
8406                if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
8407                    (pSD->pDepthStencilAttachment->attachment ==
8408                     VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
8409
8410                    skip_call |= log_msg(
8411                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
8412                        (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
8413                        "vkCmdClearAttachments() depth/stencil clear with no depth/stencil attachment in subpass; ignored");
8414                }
8415            }
8416        }
8417    }
8418    lock.unlock();
8419    if (!skip_call)
8420        dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
8421}
8422
8423VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
8424                                              VkImageLayout imageLayout, const VkClearColorValue *pColor,
8425                                              uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
8426    bool skip_call = false;
8427    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8428    std::unique_lock<std::mutex> lock(global_lock);
8429    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8430
8431    auto cb_node = getCBNode(dev_data, commandBuffer);
8432    auto img_node = getImageNode(dev_data, image);
8433    if (cb_node && img_node) {
8434        skip_call |= ValidateMemoryIsBoundToImage(dev_data, img_node, "vkCmdClearColorImage()");
8435        AddCommandBufferBindingImage(dev_data, cb_node, img_node);
8436        std::function<bool()> function = [=]() {
8437            SetImageMemoryValid(dev_data, img_node, true);
8438            return false;
8439        };
8440        cb_node->validate_functions.push_back(function);
8441
8442        skip_call |= addCmd(dev_data, cb_node, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
8443        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearColorImage()");
8444    } else {
8445        assert(0);
8446    }
8447    lock.unlock();
8448    if (!skip_call)
8449        dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
8450}
8451
8452VKAPI_ATTR void VKAPI_CALL
8453CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8454                          const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
8455                          const VkImageSubresourceRange *pRanges) {
8456    bool skip_call = false;
8457    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8458    std::unique_lock<std::mutex> lock(global_lock);
8459    // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
8460
8461    auto cb_node = getCBNode(dev_data, commandBuffer);
8462    auto img_node = getImageNode(dev_data, image);
8463    if (cb_node && img_node) {
8464        skip_call |= ValidateMemoryIsBoundToImage(dev_data, img_node, "vkCmdClearDepthStencilImage()");
8465        AddCommandBufferBindingImage(dev_data, cb_node, img_node);
8466        std::function<bool()> function = [=]() {
8467            SetImageMemoryValid(dev_data, img_node, true);
8468            return false;
8469        };
8470        cb_node->validate_functions.push_back(function);
8471
8472        skip_call |= addCmd(dev_data, cb_node, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
8473        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearDepthStencilImage()");
8474    } else {
8475        assert(0);
8476    }
8477    lock.unlock();
8478    if (!skip_call)
8479        dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
8480}
8481
8482VKAPI_ATTR void VKAPI_CALL
8483CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8484                VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
8485    bool skip_call = false;
8486    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8487    std::unique_lock<std::mutex> lock(global_lock);
8488
8489    auto cb_node = getCBNode(dev_data, commandBuffer);
8490    auto src_img_node = getImageNode(dev_data, srcImage);
8491    auto dst_img_node = getImageNode(dev_data, dstImage);
8492    if (cb_node && src_img_node && dst_img_node) {
8493        skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_img_node, "vkCmdResolveImage()");
8494        skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_img_node, "vkCmdResolveImage()");
8495        // Update bindings between images and cmd buffer
8496        AddCommandBufferBindingImage(dev_data, cb_node, src_img_node);
8497        AddCommandBufferBindingImage(dev_data, cb_node, dst_img_node);
8498        std::function<bool()> function = [=]() {
8499            return ValidateImageMemoryIsValid(dev_data, src_img_node, "vkCmdResolveImage()");
8500        };
8501        cb_node->validate_functions.push_back(function);
8502        function = [=]() {
8503            SetImageMemoryValid(dev_data, dst_img_node, true);
8504            return false;
8505        };
8506        cb_node->validate_functions.push_back(function);
8507
8508        skip_call |= addCmd(dev_data, cb_node, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
8509        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdResolveImage()");
8510    } else {
8511        assert(0);
8512    }
8513    lock.unlock();
8514    if (!skip_call)
8515        dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8516                                                 pRegions);
8517}
8518
8519bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8520    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8521    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8522    if (pCB) {
8523        pCB->eventToStageMap[event] = stageMask;
8524    }
8525    auto queue_data = dev_data->queueMap.find(queue);
8526    if (queue_data != dev_data->queueMap.end()) {
8527        queue_data->second.eventToStageMap[event] = stageMask;
8528    }
8529    return false;
8530}
8531
8532VKAPI_ATTR void VKAPI_CALL
8533CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8534    bool skip_call = false;
8535    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8536    std::unique_lock<std::mutex> lock(global_lock);
8537    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8538    if (pCB) {
8539        skip_call |= addCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
8540        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent");
8541        auto event_node = getEventNode(dev_data, event);
8542        if (event_node) {
8543            addCommandBufferBinding(&event_node->cb_bindings,
8544                                    {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
8545            event_node->cb_bindings.insert(pCB);
8546        }
8547        pCB->events.push_back(event);
8548        if (!pCB->waitedEvents.count(event)) {
8549            pCB->writeEventsBeforeWait.push_back(event);
8550        }
8551        std::function<bool(VkQueue)> eventUpdate =
8552            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
8553        pCB->eventUpdates.push_back(eventUpdate);
8554    }
8555    lock.unlock();
8556    if (!skip_call)
8557        dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
8558}
8559
8560VKAPI_ATTR void VKAPI_CALL
8561CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
8562    bool skip_call = false;
8563    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8564    std::unique_lock<std::mutex> lock(global_lock);
8565    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8566    if (pCB) {
8567        skip_call |= addCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
8568        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent");
8569        auto event_node = getEventNode(dev_data, event);
8570        if (event_node) {
8571            addCommandBufferBinding(&event_node->cb_bindings,
8572                                    {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
8573            event_node->cb_bindings.insert(pCB);
8574        }
8575        pCB->events.push_back(event);
8576        if (!pCB->waitedEvents.count(event)) {
8577            pCB->writeEventsBeforeWait.push_back(event);
8578        }
8579        std::function<bool(VkQueue)> eventUpdate =
8580            std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
8581        pCB->eventUpdates.push_back(eventUpdate);
8582    }
8583    lock.unlock();
8584    if (!skip_call)
8585        dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
8586}
8587
8588static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8589                                   const VkImageMemoryBarrier *pImgMemBarriers) {
8590    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8591    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8592    bool skip = false;
8593    uint32_t levelCount = 0;
8594    uint32_t layerCount = 0;
8595
8596    for (uint32_t i = 0; i < memBarrierCount; ++i) {
8597        auto mem_barrier = &pImgMemBarriers[i];
8598        if (!mem_barrier)
8599            continue;
8600        // TODO: Do not iterate over every possibility - consolidate where
8601        // possible
8602        ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
8603
8604        for (uint32_t j = 0; j < levelCount; j++) {
8605            uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
8606            for (uint32_t k = 0; k < layerCount; k++) {
8607                uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
8608                VkImageSubresource sub = {mem_barrier->subresourceRange.aspectMask, level, layer};
8609                IMAGE_CMD_BUF_LAYOUT_NODE node;
8610                if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
8611                    SetLayout(pCB, mem_barrier->image, sub,
8612                              IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
8613                    continue;
8614                }
8615                if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
8616                    // TODO: Set memory invalid which is in mem_tracker currently
8617                } else if (node.layout != mem_barrier->oldLayout) {
8618                    skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8619                                    __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "You cannot transition the layout from %s "
8620                                                                                    "when current layout is %s.",
8621                                    string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
8622                }
8623                SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
8624            }
8625        }
8626    }
8627    return skip;
8628}
8629
8630// Print readable FlagBits in FlagMask
8631static std::string string_VkAccessFlags(VkAccessFlags accessMask) {
8632    std::string result;
8633    std::string separator;
8634
8635    if (accessMask == 0) {
8636        result = "[None]";
8637    } else {
8638        result = "[";
8639        for (auto i = 0; i < 32; i++) {
8640            if (accessMask & (1 << i)) {
8641                result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
8642                separator = " | ";
8643            }
8644        }
8645        result = result + "]";
8646    }
8647    return result;
8648}
8649
8650// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
8651// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
8652// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
8653static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8654                             const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
8655                             const char *type) {
8656    bool skip_call = false;
8657
8658    if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
8659        if (accessMask & ~(required_bit | optional_bits)) {
8660            // TODO: Verify against Valid Use
8661            skip_call |=
8662                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8663                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
8664                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8665        }
8666    } else {
8667        if (!required_bit) {
8668            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8669                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
8670                                                                  "%s when layout is %s, unless the app has previously added a "
8671                                                                  "barrier for this transition.",
8672                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
8673                                 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
8674        } else {
8675            std::string opt_bits;
8676            if (optional_bits != 0) {
8677                std::stringstream ss;
8678                ss << optional_bits;
8679                opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
8680            }
8681            skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8682                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
8683                                                                  "layout is %s, unless the app has previously added a barrier for "
8684                                                                  "this transition.",
8685                                 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
8686                                 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
8687        }
8688    }
8689    return skip_call;
8690}
8691
8692static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
8693                                        const VkImageLayout &layout, const char *type) {
8694    bool skip_call = false;
8695    switch (layout) {
8696    case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
8697        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
8698                                      VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, type);
8699        break;
8700    }
8701    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
8702        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
8703                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, type);
8704        break;
8705    }
8706    case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
8707        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
8708        break;
8709    }
8710    case VK_IMAGE_LAYOUT_PREINITIALIZED: {
8711        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_HOST_WRITE_BIT, 0, type);
8712        break;
8713    }
8714    case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
8715        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8716                                      VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8717        break;
8718    }
8719    case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
8720        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
8721                                      VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
8722        break;
8723    }
8724    case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
8725        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
8726        break;
8727    }
8728    case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: {
8729        skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_MEMORY_READ_BIT, 0, type);
8730        break;
8731    }
8732    case VK_IMAGE_LAYOUT_UNDEFINED: {
8733        if (accessMask != 0) {
8734            // TODO: Verify against Valid Use section spec
8735            skip_call |=
8736                log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8737                        DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
8738                        type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
8739        }
8740        break;
8741    }
8742    case VK_IMAGE_LAYOUT_GENERAL:
8743    default: { break; }
8744    }
8745    return skip_call;
8746}
8747
8748static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
8749                             const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
8750                             const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
8751                             const VkImageMemoryBarrier *pImageMemBarriers) {
8752    bool skip_call = false;
8753    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
8754    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
8755    if (pCB->activeRenderPass && memBarrierCount) {
8756        if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
8757            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8758                                 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
8759                                                                  "with no self dependency specified.",
8760                                 funcName, pCB->activeSubpass);
8761        }
8762    }
8763    for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
8764        auto mem_barrier = &pImageMemBarriers[i];
8765        auto image_data = getImageNode(dev_data, mem_barrier->image);
8766        if (image_data) {
8767            uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
8768            uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
8769            if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
8770                // srcQueueFamilyIndex and dstQueueFamilyIndex must both
8771                // be VK_QUEUE_FAMILY_IGNORED
8772                if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
8773                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8774                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8775                                         "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
8776                                         "VK_SHARING_MODE_CONCURRENT.  Src and dst "
8777                                         " queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
8778                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8779                }
8780            } else {
8781                // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
8782                // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
8783                // or both be a valid queue family
8784                if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
8785                    (src_q_f_index != dst_q_f_index)) {
8786                    skip_call |=
8787                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8788                                DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
8789                                                                     "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
8790                                                                     "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
8791                                                                     "must be.",
8792                                funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
8793                } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
8794                           ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
8795                            (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
8796                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8797                                         __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8798                                         "%s: Image 0x%" PRIx64 " was created with sharingMode "
8799                                         "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
8800                                         " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
8801                                         "queueFamilies crated for this device.",
8802                                         funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index,
8803                                         dst_q_f_index, dev_data->phys_dev_properties.queue_family_properties.size());
8804                }
8805            }
8806        }
8807
8808        if (mem_barrier) {
8809            if (mem_barrier->oldLayout != mem_barrier->newLayout) {
8810                skip_call |=
8811                    ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
8812                skip_call |=
8813                    ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
8814            }
8815            if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
8816                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8817                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
8818                                                         "PREINITIALIZED.",
8819                        funcName);
8820            }
8821            auto image_data = getImageNode(dev_data, mem_barrier->image);
8822            VkFormat format = VK_FORMAT_UNDEFINED;
8823            uint32_t arrayLayers = 0, mipLevels = 0;
8824            bool imageFound = false;
8825            if (image_data) {
8826                format = image_data->createInfo.format;
8827                arrayLayers = image_data->createInfo.arrayLayers;
8828                mipLevels = image_data->createInfo.mipLevels;
8829                imageFound = true;
8830            } else if (dev_data->device_extensions.wsi_enabled) {
8831                auto imageswap_data = getSwapchainFromImage(dev_data, mem_barrier->image);
8832                if (imageswap_data) {
8833                    auto swapchain_data = getSwapchainNode(dev_data, imageswap_data);
8834                    if (swapchain_data) {
8835                        format = swapchain_data->createInfo.imageFormat;
8836                        arrayLayers = swapchain_data->createInfo.imageArrayLayers;
8837                        mipLevels = 1;
8838                        imageFound = true;
8839                    }
8840                }
8841            }
8842            if (imageFound) {
8843                auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
8844                if (vk_format_is_depth_or_stencil(format)) {
8845                    if (vk_format_is_depth_and_stencil(format)) {
8846                        if (!(aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) && !(aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
8847                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8848                                    __LINE__, DRAWSTATE_INVALID_BARRIER, "DS",
8849                                    "%s: Image is a depth and stencil format and thus must "
8850                                    "have either one or both of VK_IMAGE_ASPECT_DEPTH_BIT and "
8851                                    "VK_IMAGE_ASPECT_STENCIL_BIT set.",
8852                                    funcName);
8853                        }
8854                    } else if (vk_format_is_depth_only(format)) {
8855                        if (!(aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT)) {
8856                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8857                                    __LINE__, DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a depth-only format and thus must "
8858                                                                               "have VK_IMAGE_ASPECT_DEPTH_BIT set.",
8859                                    funcName);
8860                        }
8861                    } else { // stencil-only case
8862                        if (!(aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
8863                            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
8864                                    __LINE__, DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a stencil-only format and thus must "
8865                                                                               "have VK_IMAGE_ASPECT_STENCIL_BIT set.",
8866                                    funcName);
8867                        }
8868                    }
8869                } else { // image is a color format
8870                    if (!(aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT)) {
8871                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8872                                DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image is a color format and thus must "
8873                                                                 "have VK_IMAGE_ASPECT_COLOR_BIT set.",
8874                                funcName);
8875                    }
8876                }
8877                int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
8878                                     ? 1
8879                                     : mem_barrier->subresourceRange.layerCount;
8880                if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
8881                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8882                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
8883                                                             "baseArrayLayer (%d) and layerCount (%d) be less "
8884                                                             "than or equal to the total number of layers (%d).",
8885                            funcName, mem_barrier->subresourceRange.baseArrayLayer, mem_barrier->subresourceRange.layerCount,
8886                            arrayLayers);
8887                }
8888                int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
8889                                     ? 1
8890                                     : mem_barrier->subresourceRange.levelCount;
8891                if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
8892                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8893                            DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
8894                                                             "(%d) and levelCount (%d) be less than or equal to "
8895                                                             "the total number of levels (%d).",
8896                            funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount,
8897                            mipLevels);
8898                }
8899            }
8900        }
8901    }
8902    for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
8903        auto mem_barrier = &pBufferMemBarriers[i];
8904        if (pCB->activeRenderPass) {
8905            skip_call |=
8906                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8907                        DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
8908        }
8909        if (!mem_barrier)
8910            continue;
8911
8912        // Validate buffer barrier queue family indices
8913        if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8914             mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
8915            (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
8916             mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
8917            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8918                                 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
8919                                 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
8920                                 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
8921                                 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8922                                 dev_data->phys_dev_properties.queue_family_properties.size());
8923        }
8924
8925        auto buffer_node = getBufferNode(dev_data, mem_barrier->buffer);
8926        if (buffer_node) {
8927            auto buffer_size = buffer_node->memSize;
8928            if (mem_barrier->offset >= buffer_size) {
8929                skip_call |= log_msg(
8930                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8931                    DRAWSTATE_INVALID_BARRIER, "DS",
8932                    "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
8933                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8934                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(buffer_size));
8935            } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
8936                skip_call |= log_msg(
8937                    dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8938                    DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
8939                                                     " whose sum is greater than total size 0x%" PRIx64 ".",
8940                    funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
8941                    reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
8942                    reinterpret_cast<const uint64_t &>(buffer_size));
8943            }
8944        }
8945    }
8946    return skip_call;
8947}
8948
8949bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
8950    bool skip_call = false;
8951    VkPipelineStageFlags stageMask = 0;
8952    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
8953    for (uint32_t i = 0; i < eventCount; ++i) {
8954        auto event = pCB->events[firstEventIndex + i];
8955        auto queue_data = dev_data->queueMap.find(queue);
8956        if (queue_data == dev_data->queueMap.end())
8957            return false;
8958        auto event_data = queue_data->second.eventToStageMap.find(event);
8959        if (event_data != queue_data->second.eventToStageMap.end()) {
8960            stageMask |= event_data->second;
8961        } else {
8962            auto global_event_data = getEventNode(dev_data, event);
8963            if (!global_event_data) {
8964                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
8965                                     reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
8966                                     "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
8967                                     reinterpret_cast<const uint64_t &>(event));
8968            } else {
8969                stageMask |= global_event_data->stageMask;
8970            }
8971        }
8972    }
8973    // TODO: Need to validate that host_bit is only set if set event is called
8974    // but set event can be called at any time.
8975    if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
8976        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8977                             DRAWSTATE_INVALID_EVENT, "DS", "Submitting cmdbuffer with call to VkCmdWaitEvents "
8978                                                            "using srcStageMask 0x%X which must be the bitwise "
8979                                                            "OR of the stageMask parameters used in calls to "
8980                                                            "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
8981                                                            "used with vkSetEvent but instead is 0x%X.",
8982                             sourceStageMask, stageMask);
8983    }
8984    return skip_call;
8985}
8986
8987VKAPI_ATTR void VKAPI_CALL
8988CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask,
8989              VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
8990              uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
8991              uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
8992    bool skip_call = false;
8993    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
8994    std::unique_lock<std::mutex> lock(global_lock);
8995    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8996    if (pCB) {
8997        auto firstEventIndex = pCB->events.size();
8998        for (uint32_t i = 0; i < eventCount; ++i) {
8999            auto event_node = getEventNode(dev_data, pEvents[i]);
9000            if (event_node) {
9001                addCommandBufferBinding(&event_node->cb_bindings,
9002                                        {reinterpret_cast<const uint64_t &>(pEvents[i]), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT},
9003                                        pCB);
9004                event_node->cb_bindings.insert(pCB);
9005            }
9006            pCB->waitedEvents.insert(pEvents[i]);
9007            pCB->events.push_back(pEvents[i]);
9008        }
9009        std::function<bool(VkQueue)> eventUpdate =
9010            std::bind(validateEventStageMask, std::placeholders::_1, pCB, eventCount, firstEventIndex, sourceStageMask);
9011        pCB->eventUpdates.push_back(eventUpdate);
9012        if (pCB->state == CB_RECORDING) {
9013            skip_call |= addCmd(dev_data, pCB, CMD_WAITEVENTS, "vkCmdWaitEvents()");
9014        } else {
9015            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
9016        }
9017        skip_call |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
9018        skip_call |=
9019            ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
9020                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
9021    }
9022    lock.unlock();
9023    if (!skip_call)
9024        dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
9025                                               memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
9026                                               imageMemoryBarrierCount, pImageMemoryBarriers);
9027}
9028
9029VKAPI_ATTR void VKAPI_CALL
9030CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
9031                   VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
9032                   uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
9033                   uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
9034    bool skip_call = false;
9035    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9036    std::unique_lock<std::mutex> lock(global_lock);
9037    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9038    if (pCB) {
9039        skip_call |= addCmd(dev_data, pCB, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
9040        skip_call |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
9041        skip_call |=
9042            ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
9043                             pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
9044    }
9045    lock.unlock();
9046    if (!skip_call)
9047        dev_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
9048                                                    pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
9049                                                    imageMemoryBarrierCount, pImageMemoryBarriers);
9050}
9051
9052bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
9053    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9054    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9055    if (pCB) {
9056        pCB->queryToStateMap[object] = value;
9057    }
9058    auto queue_data = dev_data->queueMap.find(queue);
9059    if (queue_data != dev_data->queueMap.end()) {
9060        queue_data->second.queryToStateMap[object] = value;
9061    }
9062    return false;
9063}
9064
9065VKAPI_ATTR void VKAPI_CALL
9066CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
9067    bool skip_call = false;
9068    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9069    std::unique_lock<std::mutex> lock(global_lock);
9070    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9071    if (pCB) {
9072        QueryObject query = {queryPool, slot};
9073        pCB->activeQueries.insert(query);
9074        if (!pCB->startedQueries.count(query)) {
9075            pCB->startedQueries.insert(query);
9076        }
9077        skip_call |= addCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
9078        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9079                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9080    }
9081    lock.unlock();
9082    if (!skip_call)
9083        dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
9084}
9085
9086VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
9087    bool skip_call = false;
9088    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9089    std::unique_lock<std::mutex> lock(global_lock);
9090    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9091    if (pCB) {
9092        QueryObject query = {queryPool, slot};
9093        if (!pCB->activeQueries.count(query)) {
9094            skip_call |=
9095                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9096                        DRAWSTATE_INVALID_QUERY, "DS", "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d",
9097                        (uint64_t)(queryPool), slot);
9098        } else {
9099            pCB->activeQueries.erase(query);
9100        }
9101        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
9102        pCB->queryUpdates.push_back(queryUpdate);
9103        if (pCB->state == CB_RECORDING) {
9104            skip_call |= addCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
9105        } else {
9106            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
9107        }
9108        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9109                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9110    }
9111    lock.unlock();
9112    if (!skip_call)
9113        dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
9114}
9115
9116VKAPI_ATTR void VKAPI_CALL
9117CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
9118    bool skip_call = false;
9119    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9120    std::unique_lock<std::mutex> lock(global_lock);
9121    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9122    if (pCB) {
9123        for (uint32_t i = 0; i < queryCount; i++) {
9124            QueryObject query = {queryPool, firstQuery + i};
9125            pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
9126            std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false);
9127            pCB->queryUpdates.push_back(queryUpdate);
9128        }
9129        if (pCB->state == CB_RECORDING) {
9130            skip_call |= addCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
9131        } else {
9132            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
9133        }
9134        skip_call |= insideRenderPass(dev_data, pCB, "vkCmdQueryPool");
9135        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9136                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
9137    }
9138    lock.unlock();
9139    if (!skip_call)
9140        dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
9141}
9142
9143bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) {
9144    bool skip_call = false;
9145    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
9146    auto queue_data = dev_data->queueMap.find(queue);
9147    if (queue_data == dev_data->queueMap.end())
9148        return false;
9149    for (uint32_t i = 0; i < queryCount; i++) {
9150        QueryObject query = {queryPool, firstQuery + i};
9151        auto query_data = queue_data->second.queryToStateMap.find(query);
9152        bool fail = false;
9153        if (query_data != queue_data->second.queryToStateMap.end()) {
9154            if (!query_data->second) {
9155                fail = true;
9156            }
9157        } else {
9158            auto global_query_data = dev_data->queryToStateMap.find(query);
9159            if (global_query_data != dev_data->queryToStateMap.end()) {
9160                if (!global_query_data->second) {
9161                    fail = true;
9162                }
9163            } else {
9164                fail = true;
9165            }
9166        }
9167        if (fail) {
9168            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9169                                 DRAWSTATE_INVALID_QUERY, "DS",
9170                                 "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
9171                                 reinterpret_cast<uint64_t &>(queryPool), firstQuery + i);
9172        }
9173    }
9174    return skip_call;
9175}
9176
9177VKAPI_ATTR void VKAPI_CALL
9178CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
9179                        VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
9180    bool skip_call = false;
9181    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9182    std::unique_lock<std::mutex> lock(global_lock);
9183
9184    auto cb_node = getCBNode(dev_data, commandBuffer);
9185    auto dst_buff_node = getBufferNode(dev_data, dstBuffer);
9186    if (cb_node && dst_buff_node) {
9187        skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_node, "vkCmdCopyQueryPoolResults()");
9188        // Update bindings between buffer and cmd buffer
9189        AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_node);
9190        // Validate that DST buffer has correct usage flags set
9191        skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_node, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
9192                                              "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
9193        std::function<bool()> function = [=]() {
9194            SetBufferMemoryValid(dev_data, dst_buff_node, true);
9195            return false;
9196        };
9197        cb_node->validate_functions.push_back(function);
9198        std::function<bool(VkQueue)> queryUpdate =
9199            std::bind(validateQuery, std::placeholders::_1, cb_node, queryPool, queryCount, firstQuery);
9200        cb_node->queryUpdates.push_back(queryUpdate);
9201        if (cb_node->state == CB_RECORDING) {
9202            skip_call |= addCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
9203        } else {
9204            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
9205        }
9206        skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()");
9207        addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9208                                {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, cb_node);
9209    } else {
9210        assert(0);
9211    }
9212    lock.unlock();
9213    if (!skip_call)
9214        dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
9215                                                         stride, flags);
9216}
9217
9218VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
9219                                            VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
9220                                            const void *pValues) {
9221    bool skip_call = false;
9222    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9223    std::unique_lock<std::mutex> lock(global_lock);
9224    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9225    if (pCB) {
9226        if (pCB->state == CB_RECORDING) {
9227            skip_call |= addCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
9228        } else {
9229            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
9230        }
9231    }
9232    skip_call |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
9233    if (0 == stageFlags) {
9234        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9235                             DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() call has no stageFlags set.");
9236    }
9237
9238    // Check if push constant update is within any of the ranges with the same stage flags specified in pipeline layout.
9239    auto pipeline_layout = getPipelineLayout(dev_data, layout);
9240    // Coalesce adjacent/overlapping pipeline ranges before checking to see if incoming range is
9241    // contained in the pipeline ranges.
9242    // Build a {start, end} span list for ranges with matching stage flags.
9243    const auto &ranges = pipeline_layout->push_constant_ranges;
9244    struct span {
9245        uint32_t start;
9246        uint32_t end;
9247    };
9248    std::vector<span> spans;
9249    spans.reserve(ranges.size());
9250    for (const auto &iter : ranges) {
9251        if (iter.stageFlags == stageFlags) {
9252            spans.push_back({iter.offset, iter.offset + iter.size});
9253        }
9254    }
9255    if (spans.size() == 0) {
9256        // There were no ranges that matched the stageFlags.
9257        skip_call |=
9258            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9259                    DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() stageFlags = 0x%" PRIx32 " do not match "
9260                                                          "the stageFlags in any of the ranges in pipeline layout 0x%" PRIx64 ".",
9261                    (uint32_t)stageFlags, (uint64_t)layout);
9262    } else {
9263        // Sort span list by start value.
9264        struct comparer {
9265            bool operator()(struct span i, struct span j) { return i.start < j.start; }
9266        } my_comparer;
9267        std::sort(spans.begin(), spans.end(), my_comparer);
9268
9269        // Examine two spans at a time.
9270        std::vector<span>::iterator current = spans.begin();
9271        std::vector<span>::iterator next = current + 1;
9272        while (next != spans.end()) {
9273            if (current->end < next->start) {
9274                // There is a gap; cannot coalesce. Move to the next two spans.
9275                ++current;
9276                ++next;
9277            } else {
9278                // Coalesce the two spans.  The start of the next span
9279                // is within the current span, so pick the larger of
9280                // the end values to extend the current span.
9281                // Then delete the next span and set next to the span after it.
9282                current->end = max(current->end, next->end);
9283                next = spans.erase(next);
9284            }
9285        }
9286
9287        // Now we can check if the incoming range is within any of the spans.
9288        bool contained_in_a_range = false;
9289        for (uint32_t i = 0; i < spans.size(); ++i) {
9290            if ((offset >= spans[i].start) && ((uint64_t)offset + (uint64_t)size <= (uint64_t)spans[i].end)) {
9291                contained_in_a_range = true;
9292                break;
9293            }
9294        }
9295        if (!contained_in_a_range) {
9296            skip_call |=
9297                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9298                        DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCmdPushConstants() Push constant range [%d, %d) "
9299                                                              "with stageFlags = 0x%" PRIx32 " "
9300                                                              "not within flag-matching ranges in pipeline layout 0x%" PRIx64 ".",
9301                        offset, offset + size, (uint32_t)stageFlags, (uint64_t)layout);
9302        }
9303    }
9304    lock.unlock();
9305    if (!skip_call)
9306        dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
9307}
9308
9309VKAPI_ATTR void VKAPI_CALL
9310CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
9311    bool skip_call = false;
9312    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9313    std::unique_lock<std::mutex> lock(global_lock);
9314    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9315    if (pCB) {
9316        QueryObject query = {queryPool, slot};
9317        std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
9318        pCB->queryUpdates.push_back(queryUpdate);
9319        if (pCB->state == CB_RECORDING) {
9320            skip_call |= addCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
9321        } else {
9322            skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
9323        }
9324    }
9325    lock.unlock();
9326    if (!skip_call)
9327        dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
9328}
9329
9330static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
9331                       const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag) {
9332    bool skip_call = false;
9333
9334    for (uint32_t attach = 0; attach < count; attach++) {
9335        if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
9336            // Attachment counts are verified elsewhere, but prevent an invalid access
9337            if (attachments[attach].attachment < fbci->attachmentCount) {
9338                const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
9339                auto view_state = getImageViewState(dev_data, *image_view);
9340                if (view_state) {
9341                    const VkImageCreateInfo *ici = &getImageNode(dev_data, view_state->create_info.image)->createInfo;
9342                    if (ici != nullptr) {
9343                        if ((ici->usage & usage_flag) == 0) {
9344                            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
9345                                                 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_USAGE, "DS",
9346                                                 "vkCreateFramebuffer:  Framebuffer Attachment (%d) conflicts with the image's "
9347                                                 "IMAGE_USAGE flags (%s).",
9348                                                 attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag));
9349                        }
9350                    }
9351                }
9352            }
9353        }
9354    }
9355    return skip_call;
9356}
9357
9358// Validate VkFramebufferCreateInfo which includes:
9359// 1. attachmentCount equals renderPass attachmentCount
9360// 2. corresponding framebuffer and renderpass attachments have matching formats
9361// 3. corresponding framebuffer and renderpass attachments have matching sample counts
9362// 4. fb attachments only have a single mip level
9363// 5. fb attachment dimensions are each at least as large as the fb
9364// 6. fb attachments use idenity swizzle
9365// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
9366// 8. fb dimensions are within physical device limits
9367static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
9368    bool skip_call = false;
9369
9370    auto rp_node = getRenderPass(dev_data, pCreateInfo->renderPass);
9371    if (rp_node) {
9372        const VkRenderPassCreateInfo *rpci = rp_node->createInfo.ptr();
9373        if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
9374            skip_call |= log_msg(
9375                dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9376                reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
9377                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of "
9378                "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer.",
9379                pCreateInfo->attachmentCount, rpci->attachmentCount, reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9380        } else {
9381            // attachmentCounts match, so make sure corresponding attachment details line up
9382            const VkImageView *image_views = pCreateInfo->pAttachments;
9383            for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9384                auto view_state = getImageViewState(dev_data, image_views[i]);
9385                auto &ivci = view_state->create_info;
9386                if (ivci.format != rpci->pAttachments[i].format) {
9387                    skip_call |= log_msg(
9388                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9389                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
9390                        "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match "
9391                              "the format of "
9392                              "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").",
9393                        i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
9394                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9395                }
9396                const VkImageCreateInfo *ici = &getImageNode(dev_data, ivci.image)->createInfo;
9397                if (ici->samples != rpci->pAttachments[i].samples) {
9398                    skip_call |= log_msg(
9399                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9400                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE,
9401                        "DS", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match "
9402                              "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 ").",
9403                        i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
9404                        reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9405                }
9406                // Verify that view only has a single mip level
9407                if (ivci.subresourceRange.levelCount != 1) {
9408                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
9409                                         __LINE__, DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9410                                         "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u "
9411                                         "but only a single mip level (levelCount ==  1) is allowed when creating a Framebuffer.",
9412                                         i, ivci.subresourceRange.levelCount);
9413                }
9414                const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
9415                uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
9416                uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
9417                if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
9418                    (mip_height < pCreateInfo->height)) {
9419                    skip_call |=
9420                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9421                                DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9422                                "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller "
9423                                "than the corresponding "
9424                                "framebuffer dimensions. Attachment dimensions must be at least as large. Here are the respective "
9425                                "dimensions for "
9426                                "attachment #%u, framebuffer:\n"
9427                                "width: %u, %u\n"
9428                                "height: %u, %u\n"
9429                                "layerCount: %u, %u\n",
9430                                i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
9431                                pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
9432                }
9433                if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
9434                    ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
9435                    ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
9436                    ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
9437                    skip_call |= log_msg(
9438                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9439                        DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9440                        "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer "
9441                        "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n"
9442                        "r swizzle = %s\n"
9443                        "g swizzle = %s\n"
9444                        "b swizzle = %s\n"
9445                        "a swizzle = %s\n",
9446                        i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
9447                        string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a));
9448                }
9449            }
9450        }
9451        // Verify correct attachment usage flags
9452        for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
9453            // Verify input attachments:
9454            skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount,
9455                                    rpci->pSubpasses[subpass].pInputAttachments, pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT);
9456            // Verify color attachments:
9457            skip_call |= MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount,
9458                                    rpci->pSubpasses[subpass].pColorAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
9459            // Verify depth/stencil attachments:
9460            if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
9461                skip_call |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
9462                                        VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
9463            }
9464        }
9465    } else {
9466        skip_call |=
9467            log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
9468                    reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9469                    "vkCreateFramebuffer(): Attempt to create framebuffer with invalid renderPass (0x%" PRIxLEAST64 ").",
9470                    reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass));
9471    }
9472    // Verify FB dimensions are within physical device limits
9473    if ((pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) ||
9474        (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) ||
9475        (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers)) {
9476        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
9477                             DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
9478                             "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo dimensions exceed physical device limits. "
9479                             "Here are the respective dimensions: requested, device max:\n"
9480                             "width: %u, %u\n"
9481                             "height: %u, %u\n"
9482                             "layerCount: %u, %u\n",
9483                             pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
9484                             pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
9485                             pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers);
9486    }
9487    return skip_call;
9488}
9489
9490// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
9491//  Return true if an error is encountered and callback returns true to skip call down chain
9492//   false indicates that call down chain should proceed
9493static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
9494    // TODO : Verify that renderPass FB is created with is compatible with FB
9495    bool skip_call = false;
9496    skip_call |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
9497    return skip_call;
9498}
9499
9500// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
9501static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
9502    // Shadow create info and store in map
9503    std::unique_ptr<FRAMEBUFFER_NODE> fb_node(
9504        new FRAMEBUFFER_NODE(fb, pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->createInfo.ptr()));
9505
9506    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9507        VkImageView view = pCreateInfo->pAttachments[i];
9508        auto view_state = getImageViewState(dev_data, view);
9509        if (!view_state) {
9510            continue;
9511        }
9512        MT_FB_ATTACHMENT_INFO fb_info;
9513        fb_info.mem = getImageNode(dev_data, view_state->create_info.image)->mem;
9514        fb_info.view_state = view_state;
9515        fb_info.image = view_state->create_info.image;
9516        fb_node->attachments.push_back(fb_info);
9517    }
9518    dev_data->frameBufferMap[fb] = std::move(fb_node);
9519}
9520
9521VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
9522                                                 const VkAllocationCallbacks *pAllocator,
9523                                                 VkFramebuffer *pFramebuffer) {
9524    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9525    std::unique_lock<std::mutex> lock(global_lock);
9526    bool skip_call = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
9527    lock.unlock();
9528
9529    if (skip_call)
9530        return VK_ERROR_VALIDATION_FAILED_EXT;
9531
9532    VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
9533
9534    if (VK_SUCCESS == result) {
9535        lock.lock();
9536        PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
9537        lock.unlock();
9538    }
9539    return result;
9540}
9541
9542static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
9543                           std::unordered_set<uint32_t> &processed_nodes) {
9544    // If we have already checked this node we have not found a dependency path so return false.
9545    if (processed_nodes.count(index))
9546        return false;
9547    processed_nodes.insert(index);
9548    const DAGNode &node = subpass_to_node[index];
9549    // Look for a dependency path. If one exists return true else recurse on the previous nodes.
9550    if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
9551        for (auto elem : node.prev) {
9552            if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
9553                return true;
9554        }
9555    } else {
9556        return true;
9557    }
9558    return false;
9559}
9560
9561static bool CheckDependencyExists(const layer_data *dev_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
9562                                  const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
9563    bool result = true;
9564    // Loop through all subpasses that share the same attachment and make sure a dependency exists
9565    for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
9566        if (static_cast<uint32_t>(subpass) == dependent_subpasses[k])
9567            continue;
9568        const DAGNode &node = subpass_to_node[subpass];
9569        // Check for a specified dependency between the two nodes. If one exists we are done.
9570        auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
9571        auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
9572        if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
9573            // If no dependency exits an implicit dependency still might. If not, throw an error.
9574            std::unordered_set<uint32_t> processed_nodes;
9575            if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
9576                FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
9577                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9578                                     __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
9579                                     "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
9580                                     dependent_subpasses[k]);
9581                result = false;
9582            }
9583        }
9584    }
9585    return result;
9586}
9587
9588static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
9589                           const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
9590    const DAGNode &node = subpass_to_node[index];
9591    // If this node writes to the attachment return true as next nodes need to preserve the attachment.
9592    const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9593    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9594        if (attachment == subpass.pColorAttachments[j].attachment)
9595            return true;
9596    }
9597    if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9598        if (attachment == subpass.pDepthStencilAttachment->attachment)
9599            return true;
9600    }
9601    bool result = false;
9602    // Loop through previous nodes and see if any of them write to the attachment.
9603    for (auto elem : node.prev) {
9604        result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
9605    }
9606    // If the attachment was written to by a previous node than this node needs to preserve it.
9607    if (result && depth > 0) {
9608        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
9609        bool has_preserved = false;
9610        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9611            if (subpass.pPreserveAttachments[j] == attachment) {
9612                has_preserved = true;
9613                break;
9614            }
9615        }
9616        if (!has_preserved) {
9617            skip_call |=
9618                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9619                        DRAWSTATE_INVALID_RENDERPASS, "DS",
9620                        "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
9621        }
9622    }
9623    return result;
9624}
9625
9626template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
9627    return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
9628           ((offset1 > offset2) && (offset1 < (offset2 + size2)));
9629}
9630
9631bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
9632    return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
9633            isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
9634}
9635
9636static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_NODE const *framebuffer,
9637                                 RENDER_PASS_NODE const *renderPass) {
9638    bool skip_call = false;
9639    auto const pFramebufferInfo = framebuffer->createInfo.ptr();
9640    auto const pCreateInfo = renderPass->createInfo.ptr();
9641    auto const & subpass_to_node = renderPass->subpassToNode;
9642    std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
9643    std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
9644    std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
9645    // Find overlapping attachments
9646    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
9647        for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
9648            VkImageView viewi = pFramebufferInfo->pAttachments[i];
9649            VkImageView viewj = pFramebufferInfo->pAttachments[j];
9650            if (viewi == viewj) {
9651                overlapping_attachments[i].push_back(j);
9652                overlapping_attachments[j].push_back(i);
9653                continue;
9654            }
9655            auto view_state_i = getImageViewState(dev_data, viewi);
9656            auto view_state_j = getImageViewState(dev_data, viewj);
9657            if (!view_state_i || !view_state_j) {
9658                continue;
9659            }
9660            auto view_ci_i = view_state_i->create_info;
9661            auto view_ci_j = view_state_j->create_info;
9662            if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
9663                overlapping_attachments[i].push_back(j);
9664                overlapping_attachments[j].push_back(i);
9665                continue;
9666            }
9667            auto image_data_i = getImageNode(dev_data, view_ci_i.image);
9668            auto image_data_j = getImageNode(dev_data, view_ci_j.image);
9669            if (!image_data_i || !image_data_j) {
9670                continue;
9671            }
9672            if (image_data_i->mem == image_data_j->mem && isRangeOverlapping(image_data_i->memOffset, image_data_i->memSize,
9673                                                                             image_data_j->memOffset, image_data_j->memSize)) {
9674                overlapping_attachments[i].push_back(j);
9675                overlapping_attachments[j].push_back(i);
9676            }
9677        }
9678    }
9679    for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
9680        uint32_t attachment = i;
9681        for (auto other_attachment : overlapping_attachments[i]) {
9682            if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9683                skip_call |=
9684                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9685                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9686                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9687                            attachment, other_attachment);
9688            }
9689            if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
9690                skip_call |=
9691                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9692                            DRAWSTATE_INVALID_RENDERPASS, "DS", "Attachment %d aliases attachment %d but doesn't "
9693                                                                "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT.",
9694                            other_attachment, attachment);
9695            }
9696        }
9697    }
9698    // Find for each attachment the subpasses that use them.
9699    unordered_set<uint32_t> attachmentIndices;
9700    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9701        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9702        attachmentIndices.clear();
9703        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9704            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9705            if (attachment == VK_ATTACHMENT_UNUSED)
9706                continue;
9707            input_attachment_to_subpass[attachment].push_back(i);
9708            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9709                input_attachment_to_subpass[overlapping_attachment].push_back(i);
9710            }
9711        }
9712        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9713            uint32_t attachment = subpass.pColorAttachments[j].attachment;
9714            if (attachment == VK_ATTACHMENT_UNUSED)
9715                continue;
9716            output_attachment_to_subpass[attachment].push_back(i);
9717            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9718                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9719            }
9720            attachmentIndices.insert(attachment);
9721        }
9722        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9723            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
9724            output_attachment_to_subpass[attachment].push_back(i);
9725            for (auto overlapping_attachment : overlapping_attachments[attachment]) {
9726                output_attachment_to_subpass[overlapping_attachment].push_back(i);
9727            }
9728
9729            if (attachmentIndices.count(attachment)) {
9730                skip_call |=
9731                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9732                            DRAWSTATE_INVALID_RENDERPASS, "DS",
9733                            "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
9734            }
9735        }
9736    }
9737    // If there is a dependency needed make sure one exists
9738    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9739        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9740        // If the attachment is an input then all subpasses that output must have a dependency relationship
9741        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9742            uint32_t attachment = subpass.pInputAttachments[j].attachment;
9743            if (attachment == VK_ATTACHMENT_UNUSED)
9744                continue;
9745            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9746        }
9747        // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
9748        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9749            uint32_t attachment = subpass.pColorAttachments[j].attachment;
9750            if (attachment == VK_ATTACHMENT_UNUSED)
9751                continue;
9752            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9753            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9754        }
9755        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9756            const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
9757            CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9758            CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
9759        }
9760    }
9761    // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
9762    // written.
9763    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9764        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9765        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9766            CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
9767        }
9768    }
9769    return skip_call;
9770}
9771// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
9772// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that
9773// READ_ONLY layout attachments don't have CLEAR as their loadOp.
9774static bool ValidateLayoutVsAttachmentDescription(debug_report_data *report_data, const VkImageLayout first_layout,
9775                                                  const uint32_t attachment,
9776                                                  const VkAttachmentDescription &attachment_description) {
9777    bool skip_call = false;
9778    // Verify that initial loadOp on READ_ONLY attachments is not CLEAR
9779    if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
9780        if ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
9781            (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)) {
9782            skip_call |=
9783                log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
9784                        VkDebugReportObjectTypeEXT(0), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9785                        "Cannot clear attachment %d with invalid first layout %s.", attachment, string_VkImageLayout(first_layout));
9786        }
9787    }
9788    return skip_call;
9789}
9790
9791static bool ValidateLayouts(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
9792    bool skip = false;
9793
9794    // Track when we're observing the first use of an attachment
9795    std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true);
9796    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9797        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9798        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
9799            auto attach_index = subpass.pColorAttachments[j].attachment;
9800            if (attach_index == VK_ATTACHMENT_UNUSED)
9801                continue;
9802
9803            switch (subpass.pColorAttachments[j].layout) {
9804            case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
9805                /* This is ideal. */
9806                break;
9807
9808            case VK_IMAGE_LAYOUT_GENERAL:
9809                /* May not be optimal; TODO: reconsider this warning based on
9810                 * other constraints?
9811                 */
9812                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9813                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9814                                "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
9815                break;
9816
9817            default:
9818                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9819                                __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9820                                "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
9821                                string_VkImageLayout(subpass.pColorAttachments[j].layout));
9822            }
9823
9824            if (attach_first_use[attach_index]) {
9825                skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pColorAttachments[j].layout,
9826                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
9827            }
9828            attach_first_use[attach_index] = false;
9829        }
9830        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
9831            switch (subpass.pDepthStencilAttachment->layout) {
9832            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
9833            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
9834                /* These are ideal. */
9835                break;
9836
9837            case VK_IMAGE_LAYOUT_GENERAL:
9838                /* May not be optimal; TODO: reconsider this warning based on
9839                 * other constraints? GENERAL can be better than doing a bunch
9840                 * of transitions.
9841                 */
9842                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9843                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9844                                "GENERAL layout for depth attachment may not give optimal performance.");
9845                break;
9846
9847            default:
9848                /* No other layouts are acceptable */
9849                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
9850                                __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9851                                "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "
9852                                "DEPTH_STENCIL_READ_ONLY_OPTIMAL or GENERAL.",
9853                                string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
9854            }
9855
9856            auto attach_index = subpass.pDepthStencilAttachment->attachment;
9857            if (attach_first_use[attach_index]) {
9858                skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pDepthStencilAttachment->layout,
9859                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
9860            }
9861            attach_first_use[attach_index] = false;
9862        }
9863        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
9864            auto attach_index = subpass.pInputAttachments[j].attachment;
9865            if (attach_index == VK_ATTACHMENT_UNUSED)
9866                continue;
9867
9868            switch (subpass.pInputAttachments[j].layout) {
9869            case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
9870            case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
9871                /* These are ideal. */
9872                break;
9873
9874            case VK_IMAGE_LAYOUT_GENERAL:
9875                /* May not be optimal. TODO: reconsider this warning based on
9876                 * other constraints.
9877                 */
9878                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
9879                                VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9880                                "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
9881                break;
9882
9883            default:
9884                /* No other layouts are acceptable */
9885                skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9886                                DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9887                                "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
9888                                string_VkImageLayout(subpass.pInputAttachments[j].layout));
9889            }
9890
9891            if (attach_first_use[attach_index]) {
9892                skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pInputAttachments[j].layout,
9893                                                              attach_index, pCreateInfo->pAttachments[attach_index]);
9894            }
9895            attach_first_use[attach_index] = false;
9896        }
9897    }
9898    return skip;
9899}
9900
9901static bool CreatePassDAG(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
9902                          std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
9903    bool skip_call = false;
9904    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9905        DAGNode &subpass_node = subpass_to_node[i];
9906        subpass_node.pass = i;
9907    }
9908    for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
9909        const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
9910        if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
9911            if (dependency.srcSubpass == dependency.dstSubpass) {
9912                skip_call |=
9913                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9914                            DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
9915            }
9916
9917            // We don't want to add edges to the DAG for dependencies to/from
9918            // VK_SUBPASS_EXTERNAL. We don't use them for anything, and their
9919            // presence complicates other code.
9920            continue;
9921        } else if (dependency.srcSubpass > dependency.dstSubpass) {
9922            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9923                                 DRAWSTATE_INVALID_RENDERPASS, "DS",
9924                                 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
9925        } else if (dependency.srcSubpass == dependency.dstSubpass) {
9926            has_self_dependency[dependency.srcSubpass] = true;
9927        }
9928
9929        subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
9930        subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
9931    }
9932    return skip_call;
9933}
9934
9935
9936VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
9937                                                  const VkAllocationCallbacks *pAllocator,
9938                                                  VkShaderModule *pShaderModule) {
9939    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
9940    bool skip_call = false;
9941
9942    /* Use SPIRV-Tools validator to try and catch any issues with the module itself */
9943    spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
9944    spv_const_binary_t binary { pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t) };
9945    spv_diagnostic diag = nullptr;
9946
9947    auto result = spvValidate(ctx, &binary, &diag);
9948    if (result != SPV_SUCCESS) {
9949        skip_call |=
9950            log_msg(dev_data->report_data, result == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
9951                    VkDebugReportObjectTypeEXT(0), 0, __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
9952                    "SPIR-V module not valid: %s", diag && diag->error ? diag->error : "(no error text)");
9953    }
9954
9955    spvDiagnosticDestroy(diag);
9956    spvContextDestroy(ctx);
9957
9958    if (skip_call)
9959        return VK_ERROR_VALIDATION_FAILED_EXT;
9960
9961    VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
9962
9963    if (res == VK_SUCCESS) {
9964        std::lock_guard<std::mutex> lock(global_lock);
9965        dev_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
9966    }
9967    return res;
9968}
9969
9970static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
9971    bool skip_call = false;
9972    if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
9973        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9974                             DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
9975                             "CreateRenderPass: %s attachment %d cannot be greater than the total number of attachments %d.",
9976                             type, attachment, attachment_count);
9977    }
9978    return skip_call;
9979}
9980
9981static bool IsPowerOfTwo(unsigned x) {
9982    return x && !(x & (x-1));
9983}
9984
9985static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
9986    bool skip_call = false;
9987    for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
9988        const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
9989        if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
9990            skip_call |=
9991                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9992                        DRAWSTATE_INVALID_RENDERPASS, "DS",
9993                        "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS.", i);
9994        }
9995        for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
9996            uint32_t attachment = subpass.pPreserveAttachments[j];
9997            if (attachment == VK_ATTACHMENT_UNUSED) {
9998                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9999                                     __LINE__, DRAWSTATE_INVALID_ATTACHMENT_INDEX, "DS",
10000                                     "CreateRenderPass:  Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", j);
10001            } else {
10002                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
10003            }
10004        }
10005
10006        auto subpass_performs_resolve = subpass.pResolveAttachments && std::any_of(
10007            subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
10008            [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
10009
10010        unsigned sample_count = 0;
10011
10012        for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10013            uint32_t attachment;
10014            if (subpass.pResolveAttachments) {
10015                attachment = subpass.pResolveAttachments[j].attachment;
10016                skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
10017
10018                if (!skip_call && attachment != VK_ATTACHMENT_UNUSED &&
10019                    pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
10020                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
10021                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
10022                                         "CreateRenderPass:  Subpass %u requests multisample resolve into attachment %u, "
10023                                         "which must have VK_SAMPLE_COUNT_1_BIT but has %s",
10024                                         i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples));
10025                }
10026            }
10027            attachment = subpass.pColorAttachments[j].attachment;
10028            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
10029
10030            if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
10031                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
10032
10033                if (subpass_performs_resolve &&
10034                    pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
10035                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
10036                                         __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
10037                                         "CreateRenderPass:  Subpass %u requests multisample resolve from attachment %u "
10038                                         "which has VK_SAMPLE_COUNT_1_BIT",
10039                                         i, attachment);
10040                }
10041            }
10042        }
10043
10044        if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10045            uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
10046            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
10047
10048            if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
10049                sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
10050            }
10051        }
10052
10053        for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10054            uint32_t attachment = subpass.pInputAttachments[j].attachment;
10055            skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
10056        }
10057
10058        if (sample_count && !IsPowerOfTwo(sample_count)) {
10059            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
10060                                 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
10061                                 "CreateRenderPass:  Subpass %u attempts to render to "
10062                                 "attachments with inconsistent sample counts",
10063                                 i);
10064        }
10065    }
10066    return skip_call;
10067}
10068
10069VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
10070                                                const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
10071    bool skip_call = false;
10072    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10073
10074    std::unique_lock<std::mutex> lock(global_lock);
10075
10076    skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
10077    // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
10078    //       ValidateLayouts.
10079    skip_call |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
10080    lock.unlock();
10081
10082    if (skip_call) {
10083        return VK_ERROR_VALIDATION_FAILED_EXT;
10084    }
10085
10086    VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
10087
10088    if (VK_SUCCESS == result) {
10089        lock.lock();
10090
10091        std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
10092        std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
10093        skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
10094
10095        auto render_pass = unique_ptr<RENDER_PASS_NODE>(new RENDER_PASS_NODE(pCreateInfo));
10096        render_pass->renderPass = *pRenderPass;
10097        render_pass->hasSelfDependency = has_self_dependency;
10098        render_pass->subpassToNode = subpass_to_node;
10099
10100        // TODO: Maybe fill list and then copy instead of locking
10101        std::unordered_map<uint32_t, bool> &attachment_first_read = render_pass->attachment_first_read;
10102        std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = render_pass->attachment_first_layout;
10103        for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10104            const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10105            for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10106                uint32_t attachment = subpass.pColorAttachments[j].attachment;
10107                if (!attachment_first_read.count(attachment)) {
10108                    attachment_first_read.insert(std::make_pair(attachment, false));
10109                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
10110                }
10111            }
10112            if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10113                uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
10114                if (!attachment_first_read.count(attachment)) {
10115                    attachment_first_read.insert(std::make_pair(attachment, false));
10116                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
10117                }
10118            }
10119            for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10120                uint32_t attachment = subpass.pInputAttachments[j].attachment;
10121                if (!attachment_first_read.count(attachment)) {
10122                    attachment_first_read.insert(std::make_pair(attachment, true));
10123                    attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
10124                }
10125            }
10126        }
10127
10128        dev_data->renderPassMap[*pRenderPass] = std::move(render_pass);
10129    }
10130    return result;
10131}
10132
10133static bool VerifyFramebufferAndRenderPassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
10134    bool skip_call = false;
10135    auto const pRenderPassInfo = getRenderPass(dev_data, pRenderPassBegin->renderPass)->createInfo.ptr();
10136    auto const & framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer]->createInfo;
10137    if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
10138        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10139                             DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
10140                                                                 "with a different number of attachments.");
10141    }
10142    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
10143        const VkImageView &image_view = framebufferInfo.pAttachments[i];
10144        auto view_state = getImageViewState(dev_data, image_view);
10145        assert(view_state);
10146        const VkImage &image = view_state->create_info.image;
10147        const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange;
10148        IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
10149                                             pRenderPassInfo->pAttachments[i].initialLayout};
10150        // TODO: Do not iterate over every possibility - consolidate where possible
10151        for (uint32_t j = 0; j < subRange.levelCount; j++) {
10152            uint32_t level = subRange.baseMipLevel + j;
10153            for (uint32_t k = 0; k < subRange.layerCount; k++) {
10154                uint32_t layer = subRange.baseArrayLayer + k;
10155                VkImageSubresource sub = {subRange.aspectMask, level, layer};
10156                IMAGE_CMD_BUF_LAYOUT_NODE node;
10157                if (!FindLayout(pCB, image, sub, node)) {
10158                    SetLayout(pCB, image, sub, newNode);
10159                    continue;
10160                }
10161                if (newNode.layout != VK_IMAGE_LAYOUT_UNDEFINED &&
10162                    newNode.layout != node.layout) {
10163                    skip_call |=
10164                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10165                                DRAWSTATE_INVALID_RENDERPASS, "DS",
10166                                "You cannot start a render pass using attachment %u "
10167                                "where the render pass initial layout is %s and the previous "
10168                                "known layout of the attachment is %s. The layouts must match, or "
10169                                "the render pass initial layout for the attachment must be "
10170                                "VK_IMAGE_LAYOUT_UNDEFINED",
10171                                i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
10172                }
10173            }
10174        }
10175    }
10176    return skip_call;
10177}
10178
10179static void TransitionAttachmentRefLayout(layer_data *dev_data, GLOBAL_CB_NODE *pCB,
10180                                          FRAMEBUFFER_NODE *pFramebuffer,
10181                                          VkAttachmentReference ref)
10182{
10183    if (ref.attachment != VK_ATTACHMENT_UNUSED) {
10184        auto image_view = pFramebuffer->createInfo.pAttachments[ref.attachment];
10185        SetLayout(dev_data, pCB, image_view, ref.layout);
10186    }
10187}
10188
10189static void TransitionSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
10190                                     const int subpass_index) {
10191    auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
10192    if (!renderPass)
10193        return;
10194
10195    auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer);
10196    if (!framebuffer)
10197        return;
10198
10199    auto const &subpass = renderPass->createInfo.pSubpasses[subpass_index];
10200    for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10201        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pInputAttachments[j]);
10202    }
10203    for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10204        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pColorAttachments[j]);
10205    }
10206    if (subpass.pDepthStencilAttachment) {
10207        TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, *subpass.pDepthStencilAttachment);
10208    }
10209}
10210
10211static bool validatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name) {
10212    bool skip_call = false;
10213    if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
10214        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10215                             DRAWSTATE_INVALID_COMMAND_BUFFER, "DS", "Cannot execute command %s on a secondary command buffer.",
10216                             cmd_name.c_str());
10217    }
10218    return skip_call;
10219}
10220
10221static void TransitionFinalSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
10222    auto renderPass = getRenderPass(dev_data, pRenderPassBegin->renderPass);
10223    if (!renderPass)
10224        return;
10225
10226    const VkRenderPassCreateInfo *pRenderPassInfo = renderPass->createInfo.ptr();
10227    auto framebuffer = getFramebuffer(dev_data, pRenderPassBegin->framebuffer);
10228    if (!framebuffer)
10229        return;
10230
10231    for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
10232        auto image_view = framebuffer->createInfo.pAttachments[i];
10233        SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
10234    }
10235}
10236
10237static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
10238    bool skip_call = false;
10239    const safe_VkFramebufferCreateInfo *pFramebufferInfo = &getFramebuffer(dev_data, pRenderPassBegin->framebuffer)->createInfo;
10240    if (pRenderPassBegin->renderArea.offset.x < 0 ||
10241        (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
10242        pRenderPassBegin->renderArea.offset.y < 0 ||
10243        (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
10244        skip_call |= static_cast<bool>(log_msg(
10245            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10246            DRAWSTATE_INVALID_RENDER_AREA, "CORE",
10247            "Cannot execute a render pass with renderArea not within the bound of the "
10248            "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
10249            "height %d.",
10250            pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
10251            pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
10252    }
10253    return skip_call;
10254}
10255
10256// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
10257// [load|store]Op flag must be checked
10258// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
10259template <typename T> static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
10260    if (color_depth_op != op && stencil_op != op) {
10261        return false;
10262    }
10263    bool check_color_depth_load_op = !vk_format_is_stencil_only(format);
10264    bool check_stencil_load_op = vk_format_is_depth_and_stencil(format) || !check_color_depth_load_op;
10265
10266    return (((check_color_depth_load_op == true) && (color_depth_op == op)) ||
10267            ((check_stencil_load_op == true) && (stencil_op == op)));
10268}
10269
10270VKAPI_ATTR void VKAPI_CALL
10271CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
10272    bool skip_call = false;
10273    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10274    std::unique_lock<std::mutex> lock(global_lock);
10275    GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
10276    auto renderPass = pRenderPassBegin ? getRenderPass(dev_data, pRenderPassBegin->renderPass) : nullptr;
10277    auto framebuffer = pRenderPassBegin ? getFramebuffer(dev_data, pRenderPassBegin->framebuffer) : nullptr;
10278    if (cb_node) {
10279        if (renderPass) {
10280            uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
10281            cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
10282            for (uint32_t i = 0; i < renderPass->createInfo.attachmentCount; ++i) {
10283                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
10284                auto pAttachment = &renderPass->createInfo.pAttachments[i];
10285                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10286                                                         pAttachment->stencilLoadOp,
10287                                                         VK_ATTACHMENT_LOAD_OP_CLEAR)) {
10288                    clear_op_size = static_cast<uint32_t>(i) + 1;
10289                    std::function<bool()> function = [=]() {
10290                        SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), true);
10291                        return false;
10292                    };
10293                    cb_node->validate_functions.push_back(function);
10294                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10295                                                                pAttachment->stencilLoadOp,
10296                                                                VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
10297                    std::function<bool()> function = [=]() {
10298                        SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), false);
10299                        return false;
10300                    };
10301                    cb_node->validate_functions.push_back(function);
10302                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10303                                                                pAttachment->stencilLoadOp,
10304                                                                VK_ATTACHMENT_LOAD_OP_LOAD)) {
10305                    std::function<bool()> function = [=]() {
10306                        return ValidateImageMemoryIsValid(dev_data, getImageNode(dev_data, fb_info.image),
10307                                                          "vkCmdBeginRenderPass()");
10308                    };
10309                    cb_node->validate_functions.push_back(function);
10310                }
10311                if (renderPass->attachment_first_read[i]) {
10312                    std::function<bool()> function = [=]() {
10313                        return ValidateImageMemoryIsValid(dev_data, getImageNode(dev_data, fb_info.image),
10314                                                          "vkCmdBeginRenderPass()");
10315                    };
10316                    cb_node->validate_functions.push_back(function);
10317                }
10318            }
10319            if (clear_op_size > pRenderPassBegin->clearValueCount) {
10320                skip_call |=
10321                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
10322                            reinterpret_cast<uint64_t &>(renderPass), __LINE__, VALIDATION_ERROR_00442, "DS",
10323                            "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there must "
10324                            "be at least %u "
10325                            "entries in pClearValues array to account for the highest index attachment in renderPass 0x%" PRIx64
10326                            " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array "
10327                            "is indexed by attachment number so even if some pClearValues entries between 0 and %u correspond to "
10328                            "attachments that aren't cleared they will be ignored. %s",
10329                            pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(renderPass),
10330                            clear_op_size, clear_op_size - 1, validation_error_map[VALIDATION_ERROR_00442]);
10331            }
10332            skip_call |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
10333            skip_call |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin);
10334            skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass");
10335            skip_call |= ValidateDependencies(dev_data, framebuffer, renderPass);
10336            skip_call |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass");
10337            skip_call |= addCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
10338            cb_node->activeRenderPass = renderPass;
10339            // This is a shallow copy as that is all that is needed for now
10340            cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
10341            cb_node->activeSubpass = 0;
10342            cb_node->activeSubpassContents = contents;
10343            cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
10344            // Connect this framebuffer and its children to this cmdBuffer
10345            AddFramebufferBinding(dev_data, cb_node, framebuffer);
10346            // transition attachments to the correct layouts for the first subpass
10347            TransitionSubpassLayouts(dev_data, cb_node, &cb_node->activeRenderPassBeginInfo, cb_node->activeSubpass);
10348        } else {
10349            skip_call |=
10350                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10351                        DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot use a NULL RenderPass object in vkCmdBeginRenderPass()");
10352        }
10353    }
10354    lock.unlock();
10355    if (!skip_call) {
10356        dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
10357    }
10358}
10359
10360VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
10361    bool skip_call = false;
10362    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10363    std::unique_lock<std::mutex> lock(global_lock);
10364    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10365    if (pCB) {
10366        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass");
10367        skip_call |= addCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
10368        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass");
10369
10370        auto subpassCount = pCB->activeRenderPass->createInfo.subpassCount;
10371        if (pCB->activeSubpass == subpassCount - 1) {
10372            skip_call |=
10373                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10374                        reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SUBPASS_INDEX, "DS",
10375                        "vkCmdNextSubpass(): Attempted to advance beyond final subpass");
10376        }
10377    }
10378    lock.unlock();
10379
10380    if (skip_call)
10381        return;
10382
10383    dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
10384
10385    if (pCB) {
10386      lock.lock();
10387      pCB->activeSubpass++;
10388      pCB->activeSubpassContents = contents;
10389      TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
10390    }
10391}
10392
10393VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
10394    bool skip_call = false;
10395    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10396    std::unique_lock<std::mutex> lock(global_lock);
10397    auto pCB = getCBNode(dev_data, commandBuffer);
10398    if (pCB) {
10399        RENDER_PASS_NODE* pRPNode = pCB->activeRenderPass;
10400        auto framebuffer = getFramebuffer(dev_data, pCB->activeFramebuffer);
10401        if (pRPNode) {
10402            if (pCB->activeSubpass != pRPNode->createInfo.subpassCount - 1) {
10403                skip_call |=
10404                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10405                            reinterpret_cast<uint64_t>(commandBuffer), __LINE__, DRAWSTATE_INVALID_SUBPASS_INDEX, "DS",
10406                            "vkCmdEndRenderPass(): Called before reaching final subpass");
10407            }
10408
10409            for (size_t i = 0; i < pRPNode->createInfo.attachmentCount; ++i) {
10410                MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
10411                auto pAttachment = &pRPNode->createInfo.pAttachments[i];
10412                if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
10413                                                         pAttachment->stencilStoreOp, VK_ATTACHMENT_STORE_OP_STORE)) {
10414                    std::function<bool()> function = [=]() {
10415                        SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), true);
10416                        return false;
10417                    };
10418                    pCB->validate_functions.push_back(function);
10419                } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
10420                                                                pAttachment->stencilStoreOp,
10421                                                                VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
10422                    std::function<bool()> function = [=]() {
10423                        SetImageMemoryValid(dev_data, getImageNode(dev_data, fb_info.image), false);
10424                        return false;
10425                    };
10426                    pCB->validate_functions.push_back(function);
10427                }
10428            }
10429        }
10430        skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass");
10431        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass");
10432        skip_call |= addCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
10433    }
10434    lock.unlock();
10435
10436    if (skip_call)
10437        return;
10438
10439    dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
10440
10441    if (pCB) {
10442        lock.lock();
10443        TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo);
10444        pCB->activeRenderPass = nullptr;
10445        pCB->activeSubpass = 0;
10446        pCB->activeFramebuffer = VK_NULL_HANDLE;
10447    }
10448}
10449
10450static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, uint32_t primaryAttach,
10451                                        uint32_t secondaryAttach, const char *msg) {
10452    return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10453                   DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10454                   "vkCmdExecuteCommands() called w/ invalid Secondary Cmd Buffer 0x%" PRIx64 " which has a render pass "
10455                   "that is not compatible with the Primary Cmd Buffer current render pass. "
10456                   "Attachment %u is not compatible with %u: %s",
10457                   reinterpret_cast<uint64_t &>(secondaryBuffer), primaryAttach, secondaryAttach, msg);
10458}
10459
10460static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10461                                            VkRenderPassCreateInfo const *primaryPassCI, uint32_t primaryAttach,
10462                                            VkCommandBuffer secondaryBuffer, VkRenderPassCreateInfo const *secondaryPassCI,
10463                                            uint32_t secondaryAttach, bool is_multi) {
10464    bool skip_call = false;
10465    if (primaryPassCI->attachmentCount <= primaryAttach) {
10466        primaryAttach = VK_ATTACHMENT_UNUSED;
10467    }
10468    if (secondaryPassCI->attachmentCount <= secondaryAttach) {
10469        secondaryAttach = VK_ATTACHMENT_UNUSED;
10470    }
10471    if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
10472        return skip_call;
10473    }
10474    if (primaryAttach == VK_ATTACHMENT_UNUSED) {
10475        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
10476                                                 "The first is unused while the second is not.");
10477        return skip_call;
10478    }
10479    if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
10480        skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
10481                                                 "The second is unused while the first is not.");
10482        return skip_call;
10483    }
10484    if (primaryPassCI->pAttachments[primaryAttach].format != secondaryPassCI->pAttachments[secondaryAttach].format) {
10485        skip_call |=
10486            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different formats.");
10487    }
10488    if (primaryPassCI->pAttachments[primaryAttach].samples != secondaryPassCI->pAttachments[secondaryAttach].samples) {
10489        skip_call |=
10490            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different samples.");
10491    }
10492    if (is_multi && primaryPassCI->pAttachments[primaryAttach].flags != secondaryPassCI->pAttachments[secondaryAttach].flags) {
10493        skip_call |=
10494            logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different flags.");
10495    }
10496    return skip_call;
10497}
10498
10499static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10500                                         VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
10501                                         VkRenderPassCreateInfo const *secondaryPassCI, const int subpass, bool is_multi) {
10502    bool skip_call = false;
10503    const VkSubpassDescription &primary_desc = primaryPassCI->pSubpasses[subpass];
10504    const VkSubpassDescription &secondary_desc = secondaryPassCI->pSubpasses[subpass];
10505    uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
10506    for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
10507        uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
10508        if (i < primary_desc.inputAttachmentCount) {
10509            primary_input_attach = primary_desc.pInputAttachments[i].attachment;
10510        }
10511        if (i < secondary_desc.inputAttachmentCount) {
10512            secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
10513        }
10514        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_input_attach, secondaryBuffer,
10515                                                     secondaryPassCI, secondary_input_attach, is_multi);
10516    }
10517    uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
10518    for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
10519        uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
10520        if (i < primary_desc.colorAttachmentCount) {
10521            primary_color_attach = primary_desc.pColorAttachments[i].attachment;
10522        }
10523        if (i < secondary_desc.colorAttachmentCount) {
10524            secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
10525        }
10526        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_color_attach, secondaryBuffer,
10527                                                     secondaryPassCI, secondary_color_attach, is_multi);
10528        uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
10529        if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
10530            primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
10531        }
10532        if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
10533            secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
10534        }
10535        skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_resolve_attach,
10536                                                     secondaryBuffer, secondaryPassCI, secondary_resolve_attach, is_multi);
10537    }
10538    uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
10539    if (primary_desc.pDepthStencilAttachment) {
10540        primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
10541    }
10542    if (secondary_desc.pDepthStencilAttachment) {
10543        secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
10544    }
10545    skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_depthstencil_attach,
10546                                                 secondaryBuffer, secondaryPassCI, secondary_depthstencil_attach, is_multi);
10547    return skip_call;
10548}
10549
10550// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
10551//  This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
10552//  will then feed into this function
10553static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
10554                                            VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
10555                                            VkRenderPassCreateInfo const *secondaryPassCI) {
10556    bool skip_call = false;
10557
10558    if (primaryPassCI->subpassCount != secondaryPassCI->subpassCount) {
10559        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10560                             DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10561                             "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
10562                             " that has a subpassCount of %u that is incompatible with the primary Cmd Buffer 0x%" PRIx64
10563                             " that has a subpassCount of %u.",
10564                             reinterpret_cast<uint64_t &>(secondaryBuffer), secondaryPassCI->subpassCount,
10565                             reinterpret_cast<uint64_t &>(primaryBuffer), primaryPassCI->subpassCount);
10566    } else {
10567        for (uint32_t i = 0; i < primaryPassCI->subpassCount; ++i) {
10568            skip_call |= validateSubpassCompatibility(dev_data, primaryBuffer, primaryPassCI, secondaryBuffer, secondaryPassCI, i,
10569                                                      primaryPassCI->subpassCount > 1);
10570        }
10571    }
10572    return skip_call;
10573}
10574
10575static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
10576                                VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
10577    bool skip_call = false;
10578    if (!pSubCB->beginInfo.pInheritanceInfo) {
10579        return skip_call;
10580    }
10581    VkFramebuffer primary_fb = pCB->activeFramebuffer;
10582    VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
10583    if (secondary_fb != VK_NULL_HANDLE) {
10584        if (primary_fb != secondary_fb) {
10585            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10586                                 DRAWSTATE_FRAMEBUFFER_INCOMPATIBLE, "DS",
10587                                 "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
10588                                 " which has a framebuffer 0x%" PRIx64
10589                                 " that is not the same as the primaryCB's current active framebuffer 0x%" PRIx64 ".",
10590                                 reinterpret_cast<uint64_t &>(secondaryBuffer), reinterpret_cast<uint64_t &>(secondary_fb),
10591                                 reinterpret_cast<uint64_t &>(primary_fb));
10592        }
10593        auto fb = getFramebuffer(dev_data, secondary_fb);
10594        if (!fb) {
10595            skip_call |=
10596                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10597                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10598                                                                          "which has invalid framebuffer 0x%" PRIx64 ".",
10599                        (void *)secondaryBuffer, (uint64_t)(secondary_fb));
10600            return skip_call;
10601        }
10602        auto cb_renderpass = getRenderPass(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
10603        if (cb_renderpass->renderPass != fb->createInfo.renderPass) {
10604            skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->renderPassCreateInfo.ptr(), secondaryBuffer,
10605                                                         cb_renderpass->createInfo.ptr());
10606        }
10607    }
10608    return skip_call;
10609}
10610
10611static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
10612    bool skip_call = false;
10613    unordered_set<int> activeTypes;
10614    for (auto queryObject : pCB->activeQueries) {
10615        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10616        if (queryPoolData != dev_data->queryPoolMap.end()) {
10617            if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
10618                pSubCB->beginInfo.pInheritanceInfo) {
10619                VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
10620                if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
10621                    skip_call |= log_msg(
10622                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10623                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10624                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10625                        "which has invalid active query pool 0x%" PRIx64 ". Pipeline statistics is being queried so the command "
10626                        "buffer must have all bits set on the queryPool.",
10627                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first));
10628                }
10629            }
10630            activeTypes.insert(queryPoolData->second.createInfo.queryType);
10631        }
10632    }
10633    for (auto queryObject : pSubCB->startedQueries) {
10634        auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
10635        if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
10636            skip_call |=
10637                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10638                        DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10639                        "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
10640                        "which has invalid active query pool 0x%" PRIx64 "of type %d but a query of that type has been started on "
10641                        "secondary Cmd Buffer 0x%p.",
10642                        reinterpret_cast<void *>(pCB->commandBuffer), reinterpret_cast<const uint64_t &>(queryPoolData->first),
10643                        queryPoolData->second.createInfo.queryType, reinterpret_cast<void *>(pSubCB->commandBuffer));
10644        }
10645    }
10646
10647    auto primary_pool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
10648    auto secondary_pool = getCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
10649    if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
10650        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10651                             reinterpret_cast<uint64_t>(pSubCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
10652                             "vkCmdExecuteCommands(): Primary command buffer 0x%" PRIxLEAST64
10653                             " created in queue family %d has secondary command buffer 0x%" PRIxLEAST64 " created in queue family %d.",
10654                             reinterpret_cast<uint64_t>(pCB->commandBuffer), primary_pool->queueFamilyIndex,
10655                             reinterpret_cast<uint64_t>(pSubCB->commandBuffer), secondary_pool->queueFamilyIndex);
10656    }
10657
10658    return skip_call;
10659}
10660
10661VKAPI_ATTR void VKAPI_CALL
10662CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
10663    bool skip_call = false;
10664    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
10665    std::unique_lock<std::mutex> lock(global_lock);
10666    GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
10667    if (pCB) {
10668        GLOBAL_CB_NODE *pSubCB = NULL;
10669        for (uint32_t i = 0; i < commandBuffersCount; i++) {
10670            pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
10671            if (!pSubCB) {
10672                skip_call |=
10673                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10674                            DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10675                            "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p in element %u of pCommandBuffers array.",
10676                            (void *)pCommandBuffers[i], i);
10677            } else if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
10678                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10679                                     __LINE__, DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
10680                                     "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
10681                                     "array. All cmd buffers in pCommandBuffers array must be secondary.",
10682                                     (void *)pCommandBuffers[i], i);
10683            } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
10684                auto secondary_rp_node = getRenderPass(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
10685                if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
10686                    skip_call |= log_msg(
10687                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10688                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_BEGIN_CB_INVALID_STATE, "DS",
10689                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
10690                        ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set.",
10691                        (void *)pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass);
10692                } else {
10693                    // Make sure render pass is compatible with parent command buffer pass if has continue
10694                    if (pCB->activeRenderPass->renderPass != secondary_rp_node->renderPass) {
10695                        skip_call |=
10696                            validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->createInfo.ptr(),
10697                                                            pCommandBuffers[i], secondary_rp_node->createInfo.ptr());
10698                    }
10699                    //  If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
10700                    skip_call |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
10701                }
10702                string errorString = "";
10703                // secondaryCB must have been created w/ RP compatible w/ primaryCB active renderpass
10704                if ((pCB->activeRenderPass->renderPass != secondary_rp_node->renderPass) &&
10705                    !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(),
10706                                                     secondary_rp_node->createInfo.ptr(), errorString)) {
10707                    skip_call |= log_msg(
10708                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10709                        (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
10710                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
10711                        ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
10712                        (void *)pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, (void *)commandBuffer,
10713                        (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str());
10714                }
10715            }
10716            // TODO(mlentine): Move more logic into this method
10717            skip_call |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
10718            skip_call |= validateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()");
10719            // Secondary cmdBuffers are considered pending execution starting w/
10720            // being recorded
10721            if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
10722                if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
10723                    skip_call |= log_msg(
10724                        dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10725                        (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10726                        "Attempt to simultaneously execute CB 0x%" PRIxLEAST64 " w/o VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10727                        "set!",
10728                        (uint64_t)(pCB->commandBuffer));
10729                }
10730                if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
10731                    // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
10732                    skip_call |= log_msg(
10733                        dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10734                        (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
10735                        "vkCmdExecuteCommands(): Secondary Command Buffer (0x%" PRIxLEAST64
10736                        ") does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
10737                        "(0x%" PRIxLEAST64 ") to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
10738                        "set, even though it does.",
10739                        (uint64_t)(pCommandBuffers[i]), (uint64_t)(pCB->commandBuffer));
10740                    pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
10741                }
10742            }
10743            if (!pCB->activeQueries.empty() && !dev_data->enabled_features.inheritedQueries) {
10744                skip_call |=
10745                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10746                            reinterpret_cast<uint64_t>(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
10747                            "vkCmdExecuteCommands(): Secondary Command Buffer "
10748                            "(0x%" PRIxLEAST64 ") cannot be submitted with a query in "
10749                            "flight and inherited queries not "
10750                            "supported on this device.",
10751                            reinterpret_cast<uint64_t>(pCommandBuffers[i]));
10752            }
10753            pSubCB->primaryCommandBuffer = pCB->commandBuffer;
10754            pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
10755            dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
10756            for (auto &function : pSubCB->queryUpdates) {
10757                pCB->queryUpdates.push_back(function);
10758            }
10759        }
10760        skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands");
10761        skip_call |= addCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
10762    }
10763    lock.unlock();
10764    if (!skip_call)
10765        dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
10766}
10767
10768// For any image objects that overlap mapped memory, verify that their layouts are PREINIT or GENERAL
10769static bool ValidateMapImageLayouts(VkDevice device, DEVICE_MEM_INFO const *mem_info, VkDeviceSize offset,
10770                                    VkDeviceSize end_offset) {
10771    bool skip_call = false;
10772    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10773    // Iterate over all bound image ranges and verify that for any that overlap the
10774    //  map ranges, the layouts are VK_IMAGE_LAYOUT_PREINITIALIZED or VK_IMAGE_LAYOUT_GENERAL
10775    // TODO : This can be optimized if we store ranges based on starting address and early exit when we pass our range
10776    for (auto image_handle : mem_info->bound_images) {
10777        auto img_it = mem_info->bound_ranges.find(image_handle);
10778        if (img_it != mem_info->bound_ranges.end()) {
10779            if (rangesIntersect(dev_data, &img_it->second, offset, end_offset)) {
10780                std::vector<VkImageLayout> layouts;
10781                if (FindLayouts(dev_data, VkImage(image_handle), layouts)) {
10782                    for (auto layout : layouts) {
10783                        if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
10784                            skip_call |=
10785                                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10786                                        __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
10787                                                                                        "GENERAL or PREINITIALIZED are supported.",
10788                                        string_VkImageLayout(layout));
10789                        }
10790                    }
10791                }
10792            }
10793        }
10794    }
10795    return skip_call;
10796}
10797
10798VKAPI_ATTR VkResult VKAPI_CALL
10799MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
10800    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10801
10802    bool skip_call = false;
10803    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10804    std::unique_lock<std::mutex> lock(global_lock);
10805    DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
10806    if (mem_info) {
10807        // TODO : This could me more fine-grained to track just region that is valid
10808        mem_info->global_valid = true;
10809        auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
10810        skip_call |= ValidateMapImageLayouts(device, mem_info, offset, end_offset);
10811        // TODO : Do we need to create new "bound_range" for the mapped range?
10812        SetMemRangesValid(dev_data, mem_info, offset, end_offset);
10813        if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
10814             VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
10815            skip_call =
10816                log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10817                        (uint64_t)mem, __LINE__, MEMTRACK_INVALID_STATE, "MEM",
10818                        "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64, (uint64_t)mem);
10819        }
10820    }
10821    skip_call |= ValidateMapMemRange(dev_data, mem, offset, size);
10822    lock.unlock();
10823
10824    if (!skip_call) {
10825        result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
10826        if (VK_SUCCESS == result) {
10827            lock.lock();
10828            // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
10829            storeMemRanges(dev_data, mem, offset, size);
10830            initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
10831            lock.unlock();
10832        }
10833    }
10834    return result;
10835}
10836
10837VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
10838    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10839    bool skip_call = false;
10840
10841    std::unique_lock<std::mutex> lock(global_lock);
10842    skip_call |= deleteMemRanges(dev_data, mem);
10843    lock.unlock();
10844    if (!skip_call) {
10845        dev_data->dispatch_table.UnmapMemory(device, mem);
10846    }
10847}
10848
10849static bool validateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
10850                                   const VkMappedMemoryRange *pMemRanges) {
10851    bool skip_call = false;
10852    for (uint32_t i = 0; i < memRangeCount; ++i) {
10853        auto mem_info = getMemObjInfo(dev_data, pMemRanges[i].memory);
10854        if (mem_info) {
10855            if (mem_info->mem_range.offset > pMemRanges[i].offset) {
10856                skip_call |=
10857                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10858                            (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10859                            "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
10860                            "(" PRINTF_SIZE_T_SPECIFIER ").",
10861                            funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->mem_range.offset));
10862            }
10863
10864            const uint64_t dev_dataTerminus = (mem_info->mem_range.size == VK_WHOLE_SIZE)
10865                                                  ? mem_info->alloc_info.allocationSize
10866                                                  : (mem_info->mem_range.offset + mem_info->mem_range.size);
10867            if (pMemRanges[i].size != VK_WHOLE_SIZE && (dev_dataTerminus < (pMemRanges[i].offset + pMemRanges[i].size))) {
10868                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
10869                                     VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)pMemRanges[i].memory, __LINE__,
10870                                     MEMTRACK_INVALID_MAP, "MEM", "%s: Flush/Invalidate upper-bound (" PRINTF_SIZE_T_SPECIFIER
10871                                                                  ") exceeds the Memory Object's upper-bound "
10872                                                                  "(" PRINTF_SIZE_T_SPECIFIER ").",
10873                                     funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
10874                                     static_cast<size_t>(dev_dataTerminus));
10875            }
10876        }
10877    }
10878    return skip_call;
10879}
10880
10881static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t memRangeCount,
10882                                                     const VkMappedMemoryRange *pMemRanges) {
10883    bool skip_call = false;
10884    for (uint32_t i = 0; i < memRangeCount; ++i) {
10885        auto mem_info = getMemObjInfo(dev_data, pMemRanges[i].memory);
10886        if (mem_info) {
10887            if (mem_info->shadow_copy) {
10888                VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
10889                                        ? mem_info->mem_range.size
10890                                        : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
10891                char *data = static_cast<char *>(mem_info->shadow_copy);
10892                for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
10893                    if (data[j] != NoncoherentMemoryFillValue) {
10894                        skip_call |= log_msg(
10895                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10896                            (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10897                            "Memory underflow was detected on mem obj 0x%" PRIxLEAST64, (uint64_t)pMemRanges[i].memory);
10898                    }
10899                }
10900                for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
10901                    if (data[j] != NoncoherentMemoryFillValue) {
10902                        skip_call |= log_msg(
10903                            dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10904                            (uint64_t)pMemRanges[i].memory, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
10905                            "Memory overflow was detected on mem obj 0x%" PRIxLEAST64, (uint64_t)pMemRanges[i].memory);
10906                    }
10907                }
10908                memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
10909            }
10910        }
10911    }
10912    return skip_call;
10913}
10914
10915static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t memory_range_count,
10916                                            const VkMappedMemoryRange *mem_ranges) {
10917    for (uint32_t i = 0; i < memory_range_count; ++i) {
10918        auto mem_info = getMemObjInfo(dev_data, mem_ranges[i].memory);
10919        if (mem_info && mem_info->shadow_copy) {
10920            VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
10921                                    ? mem_info->mem_range.size
10922                                    : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
10923            char *data = static_cast<char *>(mem_info->shadow_copy);
10924            memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
10925        }
10926    }
10927}
10928
10929VkResult VKAPI_CALL
10930FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10931    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10932    bool skip_call = false;
10933    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10934
10935    std::unique_lock<std::mutex> lock(global_lock);
10936    skip_call |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, memRangeCount, pMemRanges);
10937    skip_call |= validateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
10938    lock.unlock();
10939    if (!skip_call) {
10940        result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
10941    }
10942    return result;
10943}
10944
10945VkResult VKAPI_CALL
10946InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
10947    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10948    bool skip_call = false;
10949    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10950
10951    std::unique_lock<std::mutex> lock(global_lock);
10952    skip_call |= validateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
10953    lock.unlock();
10954    if (!skip_call) {
10955        result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
10956        // Update our shadow copy with modified driver data
10957        CopyNoncoherentMemoryFromDriver(dev_data, memRangeCount, pMemRanges);
10958    }
10959    return result;
10960}
10961
10962VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
10963    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10964    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
10965    bool skip_call = false;
10966    std::unique_lock<std::mutex> lock(global_lock);
10967    auto image_node = getImageNode(dev_data, image);
10968    if (image_node) {
10969        // Track objects tied to memory
10970        uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
10971        skip_call = SetMemBinding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
10972        VkMemoryRequirements memRequirements;
10973        lock.unlock();
10974        dev_data->dispatch_table.GetImageMemoryRequirements(device, image, &memRequirements);
10975        lock.lock();
10976
10977        // Track and validate bound memory range information
10978        auto mem_info = getMemObjInfo(dev_data, mem);
10979        if (mem_info) {
10980            skip_call |= InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, memRequirements,
10981                                                image_node->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
10982            skip_call |= ValidateMemoryTypes(dev_data, mem_info, memRequirements.memoryTypeBits, "vkBindImageMemory");
10983        }
10984
10985        print_mem_list(dev_data);
10986        lock.unlock();
10987        if (!skip_call) {
10988            result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
10989            lock.lock();
10990            image_node->mem = mem;
10991            image_node->memOffset = memoryOffset;
10992            image_node->memSize = memRequirements.size;
10993            lock.unlock();
10994        }
10995    } else {
10996        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
10997                reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
10998                "vkBindImageMemory: Cannot find invalid image 0x%" PRIx64 ", has it already been deleted?",
10999                reinterpret_cast<const uint64_t &>(image));
11000    }
11001    return result;
11002}
11003
11004VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
11005    bool skip_call = false;
11006    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11007    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11008    std::unique_lock<std::mutex> lock(global_lock);
11009    auto event_node = getEventNode(dev_data, event);
11010    if (event_node) {
11011        event_node->needsSignaled = false;
11012        event_node->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
11013        if (event_node->write_in_use) {
11014            skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
11015                                 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11016                                 "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
11017                                 reinterpret_cast<const uint64_t &>(event));
11018        }
11019    }
11020    lock.unlock();
11021    // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
11022    // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
11023    // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
11024    for (auto queue_data : dev_data->queueMap) {
11025        auto event_entry = queue_data.second.eventToStageMap.find(event);
11026        if (event_entry != queue_data.second.eventToStageMap.end()) {
11027            event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
11028        }
11029    }
11030    if (!skip_call)
11031        result = dev_data->dispatch_table.SetEvent(device, event);
11032    return result;
11033}
11034
11035VKAPI_ATTR VkResult VKAPI_CALL
11036QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
11037    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
11038    VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
11039    bool skip_call = false;
11040    std::unique_lock<std::mutex> lock(global_lock);
11041    auto pFence = getFenceNode(dev_data, fence);
11042    auto pQueue = getQueueNode(dev_data, queue);
11043
11044    // First verify that fence is not in use
11045    skip_call |= ValidateFenceForSubmit(dev_data, pFence);
11046
11047    if (pFence) {
11048        SubmitFence(pQueue, pFence, bindInfoCount);
11049    }
11050
11051    for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
11052        const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
11053        // Track objects tied to memory
11054        for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
11055            for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
11056                if (set_sparse_mem_binding(dev_data, bindInfo.pBufferBinds[j].pBinds[k].memory,
11057                                           (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
11058                                           "vkQueueBindSparse"))
11059                    skip_call = true;
11060            }
11061        }
11062        for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
11063            for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
11064                if (set_sparse_mem_binding(dev_data, bindInfo.pImageOpaqueBinds[j].pBinds[k].memory,
11065                                           (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11066                                           "vkQueueBindSparse"))
11067                    skip_call = true;
11068            }
11069        }
11070        for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
11071            for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
11072                if (set_sparse_mem_binding(dev_data, bindInfo.pImageBinds[j].pBinds[k].memory,
11073                                           (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11074                                           "vkQueueBindSparse"))
11075                    skip_call = true;
11076            }
11077        }
11078
11079        std::vector<SEMAPHORE_WAIT> semaphore_waits;
11080        std::vector<VkSemaphore> semaphore_signals;
11081        for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
11082            VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
11083            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11084            if (pSemaphore) {
11085                if (pSemaphore->signaled) {
11086                    if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
11087                        semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
11088                        pSemaphore->in_use.fetch_add(1);
11089                    }
11090                    pSemaphore->signaler.first = VK_NULL_HANDLE;
11091                    pSemaphore->signaled = false;
11092                } else {
11093                    skip_call |=
11094                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11095                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11096                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64
11097                                " that has no way to be signaled.",
11098                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
11099                }
11100            }
11101        }
11102        for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
11103            VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
11104            auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11105            if (pSemaphore) {
11106                if (pSemaphore->signaled) {
11107                    skip_call =
11108                        log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11109                                reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11110                                "vkQueueBindSparse: Queue 0x%" PRIx64 " is signaling semaphore 0x%" PRIx64
11111                                ", but that semaphore is already signaled.",
11112                                reinterpret_cast<const uint64_t &>(queue), reinterpret_cast<const uint64_t &>(semaphore));
11113                }
11114                else {
11115                    pSemaphore->signaler.first = queue;
11116                    pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
11117                    pSemaphore->signaled = true;
11118                    pSemaphore->in_use.fetch_add(1);
11119                    semaphore_signals.push_back(semaphore);
11120                }
11121            }
11122        }
11123
11124        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
11125                                         semaphore_waits,
11126                                         semaphore_signals,
11127                                         bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
11128    }
11129
11130    if (pFence && !bindInfoCount) {
11131        // No work to do, just dropping a fence in the queue by itself.
11132        pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
11133                                         std::vector<SEMAPHORE_WAIT>(),
11134                                         std::vector<VkSemaphore>(),
11135                                         fence);
11136    }
11137
11138    print_mem_list(dev_data);
11139    lock.unlock();
11140
11141    if (!skip_call)
11142        return dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
11143
11144    return result;
11145}
11146
11147VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
11148                                               const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
11149    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11150    VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
11151    if (result == VK_SUCCESS) {
11152        std::lock_guard<std::mutex> lock(global_lock);
11153        SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
11154        sNode->signaler.first = VK_NULL_HANDLE;
11155        sNode->signaler.second = 0;
11156        sNode->signaled = false;
11157    }
11158    return result;
11159}
11160
11161VKAPI_ATTR VkResult VKAPI_CALL
11162CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
11163    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11164    VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
11165    if (result == VK_SUCCESS) {
11166        std::lock_guard<std::mutex> lock(global_lock);
11167        dev_data->eventMap[*pEvent].needsSignaled = false;
11168        dev_data->eventMap[*pEvent].write_in_use = 0;
11169        dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
11170    }
11171    return result;
11172}
11173
11174static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, VkSwapchainCreateInfoKHR const *pCreateInfo,
11175                                              SURFACE_STATE *surface_state, SWAPCHAIN_NODE *old_swapchain_state) {
11176    auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain;
11177
11178    if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) {
11179        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11180                    reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_ALREADY_EXISTS, "DS",
11181                    "vkCreateSwapchainKHR(): surface has an existing swapchain other than oldSwapchain"))
11182            return true;
11183    }
11184    if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
11185        if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11186                    reinterpret_cast<uint64_t const &>(pCreateInfo->oldSwapchain), __LINE__, DRAWSTATE_SWAPCHAIN_WRONG_SURFACE,
11187                    "DS", "vkCreateSwapchainKHR(): pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface"))
11188            return true;
11189    }
11190
11191    return false;
11192}
11193
11194VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
11195                                                  const VkAllocationCallbacks *pAllocator,
11196                                                  VkSwapchainKHR *pSwapchain) {
11197    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11198    auto surface_state = getSurfaceState(dev_data->instance_data, pCreateInfo->surface);
11199    auto old_swapchain_state = getSwapchainNode(dev_data, pCreateInfo->oldSwapchain);
11200
11201    if (PreCallValidateCreateSwapchainKHR(dev_data, pCreateInfo, surface_state, old_swapchain_state))
11202        return VK_ERROR_VALIDATION_FAILED_EXT;
11203
11204    VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
11205
11206    if (VK_SUCCESS == result) {
11207        std::lock_guard<std::mutex> lock(global_lock);
11208        auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
11209        surface_state->swapchain = swapchain_state.get();
11210        dev_data->device_extensions.swapchainMap[*pSwapchain] = std::move(swapchain_state);
11211    } else {
11212        surface_state->swapchain = nullptr;
11213    }
11214
11215    // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced.
11216    surface_state->old_swapchain = old_swapchain_state;
11217
11218    return result;
11219}
11220
11221VKAPI_ATTR void VKAPI_CALL
11222DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
11223    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11224    bool skip_call = false;
11225
11226    std::unique_lock<std::mutex> lock(global_lock);
11227    auto swapchain_data = getSwapchainNode(dev_data, swapchain);
11228    if (swapchain_data) {
11229        if (swapchain_data->images.size() > 0) {
11230            for (auto swapchain_image : swapchain_data->images) {
11231                auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
11232                if (image_sub != dev_data->imageSubresourceMap.end()) {
11233                    for (auto imgsubpair : image_sub->second) {
11234                        auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
11235                        if (image_item != dev_data->imageLayoutMap.end()) {
11236                            dev_data->imageLayoutMap.erase(image_item);
11237                        }
11238                    }
11239                    dev_data->imageSubresourceMap.erase(image_sub);
11240                }
11241                skip_call =
11242                    clear_object_binding(dev_data, (uint64_t)swapchain_image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
11243                dev_data->imageMap.erase(swapchain_image);
11244            }
11245        }
11246
11247        auto surface_state = getSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
11248        if (surface_state) {
11249            if (surface_state->swapchain == swapchain_data)
11250                surface_state->swapchain = nullptr;
11251            if (surface_state->old_swapchain == swapchain_data)
11252                surface_state->old_swapchain = nullptr;
11253        }
11254
11255        dev_data->device_extensions.swapchainMap.erase(swapchain);
11256    }
11257    lock.unlock();
11258    if (!skip_call)
11259        dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
11260}
11261
11262VKAPI_ATTR VkResult VKAPI_CALL
11263GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
11264    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11265    VkResult result = dev_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
11266
11267    if (result == VK_SUCCESS && pSwapchainImages != NULL) {
11268        // This should never happen and is checked by param checker.
11269        if (!pCount)
11270            return result;
11271        std::lock_guard<std::mutex> lock(global_lock);
11272        const size_t count = *pCount;
11273        auto swapchain_node = getSwapchainNode(dev_data, swapchain);
11274        if (swapchain_node && !swapchain_node->images.empty()) {
11275            // TODO : Not sure I like the memcmp here, but it works
11276            const bool mismatch = (swapchain_node->images.size() != count ||
11277                                   memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
11278            if (mismatch) {
11279                // TODO: Verify against Valid Usage section of extension
11280                log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11281                        (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
11282                        "vkGetSwapchainInfoKHR(0x%" PRIx64
11283                        ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
11284                        (uint64_t)(swapchain));
11285            }
11286        }
11287        for (uint32_t i = 0; i < *pCount; ++i) {
11288            IMAGE_LAYOUT_NODE image_layout_node;
11289            image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
11290            image_layout_node.format = swapchain_node->createInfo.imageFormat;
11291            // Add imageMap entries for each swapchain image
11292            VkImageCreateInfo image_ci = {};
11293            image_ci.mipLevels = 1;
11294            image_ci.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
11295            image_ci.usage = swapchain_node->createInfo.imageUsage;
11296            image_ci.format = swapchain_node->createInfo.imageFormat;
11297            image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
11298            image_ci.extent.width = swapchain_node->createInfo.imageExtent.width;
11299            image_ci.extent.height = swapchain_node->createInfo.imageExtent.height;
11300            image_ci.sharingMode = swapchain_node->createInfo.imageSharingMode;
11301            dev_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_NODE>(new IMAGE_NODE(pSwapchainImages[i], &image_ci));
11302            auto &image_node = dev_data->imageMap[pSwapchainImages[i]];
11303            image_node->valid = false;
11304            image_node->mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
11305            swapchain_node->images.push_back(pSwapchainImages[i]);
11306            ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
11307            dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
11308            dev_data->imageLayoutMap[subpair] = image_layout_node;
11309            dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
11310        }
11311    }
11312    return result;
11313}
11314
11315VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
11316    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
11317    bool skip_call = false;
11318
11319    std::lock_guard<std::mutex> lock(global_lock);
11320    for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
11321        auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
11322        if (pSemaphore && !pSemaphore->signaled) {
11323            skip_call |=
11324                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11325                            VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11326                            "Queue 0x%" PRIx64 " is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
11327                            reinterpret_cast<uint64_t &>(queue), reinterpret_cast<const uint64_t &>(pPresentInfo->pWaitSemaphores[i]));
11328        }
11329    }
11330
11331    for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
11332        auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
11333        if (swapchain_data) {
11334            if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
11335                skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11336                                     reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE,
11337                                     "DS", "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
11338                                     pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
11339            }
11340            else {
11341                auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
11342                auto image_node = getImageNode(dev_data, image);
11343                skip_call |= ValidateImageMemoryIsValid(dev_data, image_node, "vkQueuePresentKHR()");
11344
11345                if (!image_node->acquired) {
11346                    skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11347                                         reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGE_NOT_ACQUIRED,
11348                                         "DS", "vkQueuePresentKHR: Swapchain image index %u has not been acquired.",
11349                                         pPresentInfo->pImageIndices[i]);
11350                }
11351
11352                vector<VkImageLayout> layouts;
11353                if (FindLayouts(dev_data, image, layouts)) {
11354                    for (auto layout : layouts) {
11355                        if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
11356                            skip_call |=
11357                                    log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
11358                                            reinterpret_cast<uint64_t &>(queue), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
11359                                            "Images passed to present must be in layout "
11360                                            "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR but is in %s",
11361                                            string_VkImageLayout(layout));
11362                        }
11363                    }
11364                }
11365            }
11366        }
11367    }
11368
11369    if (skip_call) {
11370        return VK_ERROR_VALIDATION_FAILED_EXT;
11371    }
11372
11373    VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
11374
11375    if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
11376        // Semaphore waits occur before error generation, if the call reached
11377        // the ICD. (Confirm?)
11378        for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
11379            auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
11380            if (pSemaphore) {
11381                pSemaphore->signaler.first = VK_NULL_HANDLE;
11382                pSemaphore->signaled = false;
11383            }
11384        }
11385
11386        for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
11387            // Note: this is imperfect, in that we can get confused about what
11388            // did or didn't succeed-- but if the app does that, it's confused
11389            // itself just as much.
11390            auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
11391
11392            if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR)
11393                continue; // this present didn't actually happen.
11394
11395            // Mark the image as having been released to the WSI
11396            auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
11397            auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
11398            auto image_node = getImageNode(dev_data, image);
11399            image_node->acquired = false;
11400        }
11401
11402        // Note: even though presentation is directed to a queue, there is no
11403        // direct ordering between QP and subsequent work, so QP (and its
11404        // semaphore waits) /never/ participate in any completion proof.
11405    }
11406
11407    return result;
11408}
11409
11410VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
11411                                                         const VkSwapchainCreateInfoKHR *pCreateInfos,
11412                                                         const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
11413    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11414    std::unique_lock<std::mutex> lock(global_lock);
11415    VkResult result =
11416        dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
11417    return result;
11418}
11419
11420VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
11421                                                   VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
11422    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11423    bool skip_call = false;
11424
11425    std::unique_lock<std::mutex> lock(global_lock);
11426
11427    if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
11428        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11429                             reinterpret_cast<uint64_t &>(device), __LINE__, DRAWSTATE_SWAPCHAIN_NO_SYNC_FOR_ACQUIRE, "DS",
11430                             "vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way "
11431                             "to determine the completion of this operation.");
11432    }
11433
11434    auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11435    if (pSemaphore && pSemaphore->signaled) {
11436        skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11437                             reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11438                             "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state");
11439    }
11440
11441    auto pFence = getFenceNode(dev_data, fence);
11442    if (pFence) {
11443        skip_call |= ValidateFenceForSubmit(dev_data, pFence);
11444    }
11445    lock.unlock();
11446
11447    if (skip_call)
11448        return VK_ERROR_VALIDATION_FAILED_EXT;
11449
11450    VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
11451
11452    lock.lock();
11453    if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
11454        if (pFence) {
11455            pFence->state = FENCE_INFLIGHT;
11456            pFence->signaler.first = VK_NULL_HANDLE;   // ANI isn't on a queue, so this can't participate in a completion proof.
11457        }
11458
11459        // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore
11460        if (pSemaphore) {
11461            pSemaphore->signaled = true;
11462            pSemaphore->signaler.first = VK_NULL_HANDLE;
11463        }
11464
11465        // Mark the image as acquired.
11466        auto swapchain_data = getSwapchainNode(dev_data, swapchain);
11467        auto image = swapchain_data->images[*pImageIndex];
11468        auto image_node = getImageNode(dev_data, image);
11469        image_node->acquired = true;
11470    }
11471    lock.unlock();
11472
11473    return result;
11474}
11475
11476VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
11477                                                        VkPhysicalDevice *pPhysicalDevices) {
11478    bool skip_call = false;
11479    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11480    if (instance_data->instance_state) {
11481        // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
11482        if (NULL == pPhysicalDevices) {
11483            instance_data->instance_state->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
11484        } else {
11485            if (UNCALLED == instance_data->instance_state->vkEnumeratePhysicalDevicesState) {
11486                // Flag warning here. You can call this without having queried the count, but it may not be
11487                // robust on platforms with multiple physical devices.
11488                skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
11489                                    0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
11490                                    "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first "
11491                                    "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
11492            } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
11493            else if (instance_data->instance_state->physical_devices_count != *pPhysicalDeviceCount) {
11494                // Having actual count match count from app is not a requirement, so this can be a warning
11495                skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11496                                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
11497                                    "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count "
11498                                    "supported by this instance is %u.",
11499                                    *pPhysicalDeviceCount, instance_data->instance_state->physical_devices_count);
11500            }
11501            instance_data->instance_state->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
11502        }
11503        if (skip_call) {
11504            return VK_ERROR_VALIDATION_FAILED_EXT;
11505        }
11506        VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
11507        if (NULL == pPhysicalDevices) {
11508            instance_data->instance_state->physical_devices_count = *pPhysicalDeviceCount;
11509        } else if (result == VK_SUCCESS){ // Save physical devices
11510            for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
11511                auto & phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
11512                phys_device_state.phys_device = pPhysicalDevices[i];
11513                // Init actual features for each physical device
11514                instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features);
11515            }
11516        }
11517        return result;
11518    } else {
11519        log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__,
11520                DEVLIMITS_INVALID_INSTANCE, "DL", "Invalid instance (0x%" PRIxLEAST64 ") passed into vkEnumeratePhysicalDevices().",
11521                (uint64_t)instance);
11522    }
11523    return VK_ERROR_VALIDATION_FAILED_EXT;
11524}
11525
11526VKAPI_ATTR void VKAPI_CALL
11527GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
11528    VkQueueFamilyProperties *pQueueFamilyProperties) {
11529    bool skip_call = false;
11530    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11531    auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
11532    if (physical_device_state) {
11533        if (!pQueueFamilyProperties) {
11534            physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
11535        }
11536        else {
11537            // Verify that for each physical device, this function is called first with NULL pQueueFamilyProperties ptr in order to
11538            // get count
11539            if (UNCALLED == physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
11540                skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11541                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
11542                    "Call sequence has vkGetPhysicalDeviceQueueFamilyProperties() w/ non-NULL "
11543                    "pQueueFamilyProperties. You should first call vkGetPhysicalDeviceQueueFamilyProperties() w/ "
11544                    "NULL pQueueFamilyProperties to query pCount.");
11545            }
11546            // Then verify that pCount that is passed in on second call matches what was returned
11547            if (physical_device_state->queueFamilyPropertiesCount != *pCount) {
11548
11549                // TODO: this is not a requirement of the Valid Usage section for vkGetPhysicalDeviceQueueFamilyProperties, so
11550                // provide as warning
11551                skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
11552                    VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
11553                    "Call to vkGetPhysicalDeviceQueueFamilyProperties() w/ pCount value %u, but actual count "
11554                    "supported by this physicalDevice is %u.",
11555                    *pCount, physical_device_state->queueFamilyPropertiesCount);
11556            }
11557            physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
11558        }
11559        if (skip_call) {
11560            return;
11561        }
11562        instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pCount, pQueueFamilyProperties);
11563        if (!pQueueFamilyProperties) {
11564            physical_device_state->queueFamilyPropertiesCount = *pCount;
11565        }
11566        else { // Save queue family properties
11567            if (physical_device_state->queue_family_properties.size() < *pCount)
11568                physical_device_state->queue_family_properties.resize(*pCount);
11569            for (uint32_t i = 0; i < *pCount; i++) {
11570                physical_device_state->queue_family_properties[i] = pQueueFamilyProperties[i];
11571            }
11572        }
11573    }
11574    else {
11575        log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
11576            __LINE__, DEVLIMITS_INVALID_PHYSICAL_DEVICE, "DL",
11577            "Invalid physicalDevice (0x%" PRIxLEAST64 ") passed into vkGetPhysicalDeviceQueueFamilyProperties().",
11578            (uint64_t)physicalDevice);
11579    }
11580}
11581
11582template<typename TCreateInfo, typename FPtr>
11583static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo,
11584                              VkAllocationCallbacks const *pAllocator, VkSurfaceKHR *pSurface,
11585                              FPtr fptr)
11586{
11587    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11588
11589    // Call down the call chain:
11590    VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface);
11591
11592    if (result == VK_SUCCESS) {
11593        std::unique_lock<std::mutex> lock(global_lock);
11594        instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
11595        lock.unlock();
11596    }
11597
11598    return result;
11599}
11600
11601VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
11602    bool skip_call = false;
11603    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11604    std::unique_lock<std::mutex> lock(global_lock);
11605    auto surface_state = getSurfaceState(instance_data, surface);
11606
11607    if (surface_state) {
11608        // TODO: track swapchains created from this surface.
11609        instance_data->surface_map.erase(surface);
11610    }
11611    lock.unlock();
11612
11613    if (!skip_call) {
11614        // Call down the call chain:
11615        instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
11616    }
11617}
11618
11619#ifdef VK_USE_PLATFORM_ANDROID_KHR
11620VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
11621                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11622    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR);
11623}
11624#endif // VK_USE_PLATFORM_ANDROID_KHR
11625
11626#ifdef VK_USE_PLATFORM_MIR_KHR
11627VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo,
11628                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11629    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR);
11630}
11631#endif // VK_USE_PLATFORM_MIR_KHR
11632
11633#ifdef VK_USE_PLATFORM_WAYLAND_KHR
11634VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
11635                                                       const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11636    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR);
11637}
11638#endif // VK_USE_PLATFORM_WAYLAND_KHR
11639
11640#ifdef VK_USE_PLATFORM_WIN32_KHR
11641VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
11642                                                     const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11643    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR);
11644}
11645#endif // VK_USE_PLATFORM_WIN32_KHR
11646
11647#ifdef VK_USE_PLATFORM_XCB_KHR
11648VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
11649                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11650    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR);
11651}
11652#endif // VK_USE_PLATFORM_XCB_KHR
11653
11654#ifdef VK_USE_PLATFORM_XLIB_KHR
11655VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
11656                                                   const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
11657    return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR);
11658}
11659#endif // VK_USE_PLATFORM_XLIB_KHR
11660
11661
11662VKAPI_ATTR VkResult VKAPI_CALL
11663CreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
11664                             const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
11665    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11666    VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
11667    if (VK_SUCCESS == res) {
11668        std::lock_guard<std::mutex> lock(global_lock);
11669        res = layer_create_msg_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
11670    }
11671    return res;
11672}
11673
11674VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance,
11675                                                         VkDebugReportCallbackEXT msgCallback,
11676                                                         const VkAllocationCallbacks *pAllocator) {
11677    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11678    instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
11679    std::lock_guard<std::mutex> lock(global_lock);
11680    layer_destroy_msg_callback(instance_data->report_data, msgCallback, pAllocator);
11681}
11682
11683VKAPI_ATTR void VKAPI_CALL
11684DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
11685                      size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
11686    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11687    instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
11688}
11689
11690VKAPI_ATTR VkResult VKAPI_CALL
11691EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
11692    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11693}
11694
11695VKAPI_ATTR VkResult VKAPI_CALL
11696EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
11697    return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
11698}
11699
11700VKAPI_ATTR VkResult VKAPI_CALL
11701EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
11702    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11703        return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
11704
11705    return VK_ERROR_LAYER_NOT_PRESENT;
11706}
11707
11708VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
11709                                                                  const char *pLayerName, uint32_t *pCount,
11710                                                                  VkExtensionProperties *pProperties) {
11711    if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
11712        return util_GetExtensionProperties(0, NULL, pCount, pProperties);
11713
11714    assert(physicalDevice);
11715
11716    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
11717    return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
11718}
11719
11720static PFN_vkVoidFunction
11721intercept_core_instance_command(const char *name);
11722
11723static PFN_vkVoidFunction
11724intercept_core_device_command(const char *name);
11725
11726static PFN_vkVoidFunction
11727intercept_khr_swapchain_command(const char *name, VkDevice dev);
11728
11729static PFN_vkVoidFunction
11730intercept_khr_surface_command(const char *name, VkInstance instance);
11731
11732VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) {
11733    PFN_vkVoidFunction proc = intercept_core_device_command(funcName);
11734    if (proc)
11735        return proc;
11736
11737    assert(dev);
11738
11739    proc = intercept_khr_swapchain_command(funcName, dev);
11740    if (proc)
11741        return proc;
11742
11743    layer_data *dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
11744
11745    auto &table = dev_data->dispatch_table;
11746    if (!table.GetDeviceProcAddr)
11747        return nullptr;
11748    return table.GetDeviceProcAddr(dev, funcName);
11749}
11750
11751VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
11752    PFN_vkVoidFunction proc = intercept_core_instance_command(funcName);
11753    if (!proc)
11754        proc = intercept_core_device_command(funcName);
11755    if (!proc)
11756        proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE);
11757    if (!proc)
11758        proc = intercept_khr_surface_command(funcName, instance);
11759    if (proc)
11760        return proc;
11761
11762    assert(instance);
11763
11764    instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
11765    proc = debug_report_get_instance_proc_addr(instance_data->report_data, funcName);
11766    if (proc)
11767        return proc;
11768
11769    auto &table = instance_data->dispatch_table;
11770    if (!table.GetInstanceProcAddr)
11771        return nullptr;
11772    return table.GetInstanceProcAddr(instance, funcName);
11773}
11774
11775static PFN_vkVoidFunction
11776intercept_core_instance_command(const char *name) {
11777    static const struct {
11778        const char *name;
11779        PFN_vkVoidFunction proc;
11780    } core_instance_commands[] = {
11781        { "vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr) },
11782        { "vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr) },
11783        { "vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance) },
11784        { "vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice) },
11785        { "vkEnumeratePhysicalDevices", reinterpret_cast<PFN_vkVoidFunction>(EnumeratePhysicalDevices) },
11786        { "vkGetPhysicalDeviceQueueFamilyProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceQueueFamilyProperties) },
11787        { "vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance) },
11788        { "vkEnumerateInstanceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceLayerProperties) },
11789        { "vkEnumerateDeviceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceLayerProperties) },
11790        { "vkEnumerateInstanceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceExtensionProperties) },
11791        { "vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties) },
11792    };
11793
11794    for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) {
11795        if (!strcmp(core_instance_commands[i].name, name))
11796            return core_instance_commands[i].proc;
11797    }
11798
11799    return nullptr;
11800}
11801
11802static PFN_vkVoidFunction
11803intercept_core_device_command(const char *name) {
11804    static const struct {
11805        const char *name;
11806        PFN_vkVoidFunction proc;
11807    } core_device_commands[] = {
11808        {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
11809        {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)},
11810        {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)},
11811        {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)},
11812        {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)},
11813        {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)},
11814        {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)},
11815        {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
11816        {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)},
11817        {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)},
11818        {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)},
11819        {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)},
11820        {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)},
11821        {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)},
11822        {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)},
11823        {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)},
11824        {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)},
11825        {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)},
11826        {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)},
11827        {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)},
11828        {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)},
11829        {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)},
11830        {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)},
11831        {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)},
11832        {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)},
11833        {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)},
11834        {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)},
11835        {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)},
11836        {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)},
11837        {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)},
11838        {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)},
11839        {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)},
11840        {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)},
11841        {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)},
11842        {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)},
11843        {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)},
11844        {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)},
11845        {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)},
11846        {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)},
11847        {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)},
11848        {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)},
11849        {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)},
11850        {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)},
11851        {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)},
11852        {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)},
11853        {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)},
11854        {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)},
11855        {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)},
11856        {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)},
11857        {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)},
11858        {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)},
11859        {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)},
11860        {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)},
11861        {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)},
11862        {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)},
11863        {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)},
11864        {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)},
11865        {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)},
11866        {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)},
11867        {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)},
11868        {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)},
11869        {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)},
11870        {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)},
11871        {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)},
11872        {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)},
11873        {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)},
11874        {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)},
11875        {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)},
11876        {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)},
11877        {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)},
11878        {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)},
11879        {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)},
11880        {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)},
11881        {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)},
11882        {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)},
11883        {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)},
11884        {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)},
11885        {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)},
11886        {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)},
11887        {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)},
11888        {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)},
11889        {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)},
11890        {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)},
11891        {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)},
11892        {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)},
11893        {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)},
11894        {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)},
11895        {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)},
11896        {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)},
11897        {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)},
11898        {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)},
11899        {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)},
11900        {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)},
11901        {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)},
11902        {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)},
11903        {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)},
11904        {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)},
11905        {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)},
11906        {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)},
11907        {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)},
11908        {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)},
11909        {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)},
11910        {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)},
11911        {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)},
11912        {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)},
11913        {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)},
11914        {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)},
11915        {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)},
11916        {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)},
11917        {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)},
11918        {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)},
11919        {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)},
11920        {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)},
11921        {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)},
11922        {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)},
11923        {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)},
11924    };
11925
11926    for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) {
11927        if (!strcmp(core_device_commands[i].name, name))
11928            return core_device_commands[i].proc;
11929    }
11930
11931    return nullptr;
11932}
11933
11934static PFN_vkVoidFunction
11935intercept_khr_swapchain_command(const char *name, VkDevice dev) {
11936    static const struct {
11937        const char *name;
11938        PFN_vkVoidFunction proc;
11939    } khr_swapchain_commands[] = {
11940        { "vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR) },
11941        { "vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR) },
11942        { "vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR) },
11943        { "vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR) },
11944        { "vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR) },
11945    };
11946    layer_data *dev_data = nullptr;
11947
11948    if (dev) {
11949        dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
11950        if (!dev_data->device_extensions.wsi_enabled)
11951            return nullptr;
11952    }
11953
11954    for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) {
11955        if (!strcmp(khr_swapchain_commands[i].name, name))
11956            return khr_swapchain_commands[i].proc;
11957    }
11958
11959    if (dev_data) {
11960        if (!dev_data->device_extensions.wsi_display_swapchain_enabled)
11961            return nullptr;
11962    }
11963
11964    if (!strcmp("vkCreateSharedSwapchainsKHR", name))
11965        return reinterpret_cast<PFN_vkVoidFunction>(CreateSharedSwapchainsKHR);
11966
11967    return nullptr;
11968}
11969
11970static PFN_vkVoidFunction
11971intercept_khr_surface_command(const char *name, VkInstance instance) {
11972    static const struct {
11973        const char *name;
11974        PFN_vkVoidFunction proc;
11975        bool instance_layer_data::*enable;
11976    } khr_surface_commands[] = {
11977#ifdef VK_USE_PLATFORM_ANDROID_KHR
11978        {"vkCreateAndroidSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateAndroidSurfaceKHR),
11979            &instance_layer_data::androidSurfaceExtensionEnabled},
11980#endif // VK_USE_PLATFORM_ANDROID_KHR
11981#ifdef VK_USE_PLATFORM_MIR_KHR
11982        {"vkCreateMirSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateMirSurfaceKHR),
11983            &instance_layer_data::mirSurfaceExtensionEnabled},
11984#endif // VK_USE_PLATFORM_MIR_KHR
11985#ifdef VK_USE_PLATFORM_WAYLAND_KHR
11986        {"vkCreateWaylandSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWaylandSurfaceKHR),
11987            &instance_layer_data::waylandSurfaceExtensionEnabled},
11988#endif // VK_USE_PLATFORM_WAYLAND_KHR
11989#ifdef VK_USE_PLATFORM_WIN32_KHR
11990        {"vkCreateWin32SurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWin32SurfaceKHR),
11991            &instance_layer_data::win32SurfaceExtensionEnabled},
11992#endif // VK_USE_PLATFORM_WIN32_KHR
11993#ifdef VK_USE_PLATFORM_XCB_KHR
11994        {"vkCreateXcbSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXcbSurfaceKHR),
11995            &instance_layer_data::xcbSurfaceExtensionEnabled},
11996#endif // VK_USE_PLATFORM_XCB_KHR
11997#ifdef VK_USE_PLATFORM_XLIB_KHR
11998        {"vkCreateXlibSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXlibSurfaceKHR),
11999            &instance_layer_data::xlibSurfaceExtensionEnabled},
12000#endif // VK_USE_PLATFORM_XLIB_KHR
12001        {"vkDestroySurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySurfaceKHR),
12002            &instance_layer_data::surfaceExtensionEnabled},
12003    };
12004
12005    instance_layer_data *instance_data = nullptr;
12006    if (instance) {
12007        instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12008    }
12009
12010    for (size_t i = 0; i < ARRAY_SIZE(khr_surface_commands); i++) {
12011        if (!strcmp(khr_surface_commands[i].name, name)) {
12012            if (instance_data && !(instance_data->*(khr_surface_commands[i].enable)))
12013                return nullptr;
12014            return khr_surface_commands[i].proc;
12015        }
12016    }
12017
12018    return nullptr;
12019}
12020
12021} // namespace core_validation
12022
12023// vk_layer_logging.h expects these to be defined
12024
12025VKAPI_ATTR VkResult VKAPI_CALL
12026vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
12027                               const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
12028    return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
12029}
12030
12031VKAPI_ATTR void VKAPI_CALL
12032vkDestroyDebugReportCallbackEXT(VkInstance instance,
12033                                VkDebugReportCallbackEXT msgCallback,
12034                                const VkAllocationCallbacks *pAllocator) {
12035    core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
12036}
12037
12038VKAPI_ATTR void VKAPI_CALL
12039vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
12040                        size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
12041    core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
12042}
12043
12044// loader-layer interface v0, just wrappers since there is only a layer
12045
12046VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
12047vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
12048    return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
12049}
12050
12051VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
12052vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
12053    return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
12054}
12055
12056VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
12057vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
12058    // the layer command handles VK_NULL_HANDLE just fine internally
12059    assert(physicalDevice == VK_NULL_HANDLE);
12060    return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
12061}
12062
12063VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
12064                                                                                    const char *pLayerName, uint32_t *pCount,
12065                                                                                    VkExtensionProperties *pProperties) {
12066    // the layer command handles VK_NULL_HANDLE just fine internally
12067    assert(physicalDevice == VK_NULL_HANDLE);
12068    return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
12069}
12070
12071VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
12072    return core_validation::GetDeviceProcAddr(dev, funcName);
12073}
12074
12075VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
12076    return core_validation::GetInstanceProcAddr(instance, funcName);
12077}
12078